core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22    unsafe extern "unadjusted" {
23        #[cfg_attr(
24            any(target_arch = "aarch64", target_arch = "arm64ec"),
25            link_name = "llvm.aarch64.crc32cx"
26        )]
27        fn ___crc32cd(crc: u32, data: u64) -> u32;
28    }
29    unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38    unsafe extern "unadjusted" {
39        #[cfg_attr(
40            any(target_arch = "aarch64", target_arch = "arm64ec"),
41            link_name = "llvm.aarch64.crc32x"
42        )]
43        fn ___crc32d(crc: u32, data: u64) -> u32;
44    }
45    unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[unstable(feature = "stdarch_aarch64_jscvt", issue = "147555")]
53pub fn __jcvt(a: f64) -> i32 {
54    unsafe extern "unadjusted" {
55        #[cfg_attr(
56            any(target_arch = "aarch64", target_arch = "arm64ec"),
57            link_name = "llvm.aarch64.fjcvtzs"
58        )]
59        fn ___jcvt(a: f64) -> i32;
60    }
61    unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70    unsafe {
71        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73        let f: int8x8_t = vabd_s8(d, e);
74        let f: uint8x8_t = simd_cast(f);
75        simd_add(a, simd_cast(f))
76    }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85    unsafe {
86        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88        let f: int16x4_t = vabd_s16(d, e);
89        let f: uint16x4_t = simd_cast(f);
90        simd_add(a, simd_cast(f))
91    }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100    unsafe {
101        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103        let f: int32x2_t = vabd_s32(d, e);
104        let f: uint32x2_t = simd_cast(f);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115    unsafe {
116        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118        let f: uint8x8_t = vabd_u8(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129    unsafe {
130        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132        let f: uint16x4_t = vabd_u16(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143    unsafe {
144        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146        let f: uint32x2_t = vabd_u32(d, e);
147        simd_add(a, simd_cast(f))
148    }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157    unsafe extern "unadjusted" {
158        #[cfg_attr(
159            any(target_arch = "aarch64", target_arch = "arm64ec"),
160            link_name = "llvm.aarch64.neon.fabd.v1f64"
161        )]
162        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163    }
164    unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173    unsafe extern "unadjusted" {
174        #[cfg_attr(
175            any(target_arch = "aarch64", target_arch = "arm64ec"),
176            link_name = "llvm.aarch64.neon.fabd.v2f64"
177        )]
178        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179    }
180    unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217    unsafe {
218        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231    unsafe {
232        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245    unsafe {
246        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249        simd_cast(e)
250    }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259    unsafe {
260        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262        simd_cast(vabd_u8(c, d))
263    }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272    unsafe {
273        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275        simd_cast(vabd_u16(c, d))
276    }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285    unsafe {
286        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288        simd_cast(vabd_u32(c, d))
289    }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298    unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307    unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316    unsafe {
317        let neg: int64x1_t = simd_neg(a);
318        let mask: int64x1_t = simd_ge(a, neg);
319        simd_select(mask, a, neg)
320    }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329    unsafe {
330        let neg: int64x2_t = simd_neg(a);
331        let mask: int64x2_t = simd_ge(a, neg);
332        simd_select(mask, a, neg)
333    }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342    unsafe extern "unadjusted" {
343        #[cfg_attr(
344            any(target_arch = "aarch64", target_arch = "arm64ec"),
345            link_name = "llvm.aarch64.neon.abs.i64"
346        )]
347        fn _vabsd_s64(a: i64) -> i64;
348    }
349    unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358    a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367    a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376    unsafe extern "unadjusted" {
377        #[cfg_attr(
378            any(target_arch = "aarch64", target_arch = "arm64ec"),
379            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380        )]
381        fn _vaddlv_s16(a: int16x4_t) -> i32;
382    }
383    unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392    unsafe extern "unadjusted" {
393        #[cfg_attr(
394            any(target_arch = "aarch64", target_arch = "arm64ec"),
395            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396        )]
397        fn _vaddlvq_s16(a: int16x8_t) -> i32;
398    }
399    unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408    unsafe extern "unadjusted" {
409        #[cfg_attr(
410            any(target_arch = "aarch64", target_arch = "arm64ec"),
411            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412        )]
413        fn _vaddlvq_s32(a: int32x4_t) -> i64;
414    }
415    unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424    unsafe extern "unadjusted" {
425        #[cfg_attr(
426            any(target_arch = "aarch64", target_arch = "arm64ec"),
427            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428        )]
429        fn _vaddlv_s32(a: int32x2_t) -> i64;
430    }
431    unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440    unsafe extern "unadjusted" {
441        #[cfg_attr(
442            any(target_arch = "aarch64", target_arch = "arm64ec"),
443            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444        )]
445        fn _vaddlv_s8(a: int8x8_t) -> i32;
446    }
447    unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456    unsafe extern "unadjusted" {
457        #[cfg_attr(
458            any(target_arch = "aarch64", target_arch = "arm64ec"),
459            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460        )]
461        fn _vaddlvq_s8(a: int8x16_t) -> i32;
462    }
463    unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472    unsafe extern "unadjusted" {
473        #[cfg_attr(
474            any(target_arch = "aarch64", target_arch = "arm64ec"),
475            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476        )]
477        fn _vaddlv_u16(a: uint16x4_t) -> u32;
478    }
479    unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488    unsafe extern "unadjusted" {
489        #[cfg_attr(
490            any(target_arch = "aarch64", target_arch = "arm64ec"),
491            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492        )]
493        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494    }
495    unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504    unsafe extern "unadjusted" {
505        #[cfg_attr(
506            any(target_arch = "aarch64", target_arch = "arm64ec"),
507            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508        )]
509        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510    }
511    unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520    unsafe extern "unadjusted" {
521        #[cfg_attr(
522            any(target_arch = "aarch64", target_arch = "arm64ec"),
523            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524        )]
525        fn _vaddlv_u32(a: uint32x2_t) -> u64;
526    }
527    unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536    unsafe extern "unadjusted" {
537        #[cfg_attr(
538            any(target_arch = "aarch64", target_arch = "arm64ec"),
539            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540        )]
541        fn _vaddlv_u8(a: uint8x8_t) -> i32;
542    }
543    unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552    unsafe extern "unadjusted" {
553        #[cfg_attr(
554            any(target_arch = "aarch64", target_arch = "arm64ec"),
555            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556        )]
557        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558    }
559    unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568    unsafe extern "unadjusted" {
569        #[cfg_attr(
570            any(target_arch = "aarch64", target_arch = "arm64ec"),
571            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572        )]
573        fn _vaddv_f32(a: float32x2_t) -> f32;
574    }
575    unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584    unsafe extern "unadjusted" {
585        #[cfg_attr(
586            any(target_arch = "aarch64", target_arch = "arm64ec"),
587            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588        )]
589        fn _vaddvq_f32(a: float32x4_t) -> f32;
590    }
591    unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600    unsafe extern "unadjusted" {
601        #[cfg_attr(
602            any(target_arch = "aarch64", target_arch = "arm64ec"),
603            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604        )]
605        fn _vaddvq_f64(a: float64x2_t) -> f64;
606    }
607    unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616    unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625    unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634    unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643    unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652    unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661    unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670    unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679    unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688    unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697    unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706    unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715    unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724    unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733    unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
737#[inline]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(test, assert_instr(nop))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
742    unsafe extern "unadjusted" {
743        #[cfg_attr(
744            any(target_arch = "aarch64", target_arch = "arm64ec"),
745            link_name = "llvm.aarch64.neon.famax.v2f32"
746        )]
747        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
748    }
749    unsafe { _vamax_f32(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
753#[inline]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(test, assert_instr(nop))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(
760            any(target_arch = "aarch64", target_arch = "arm64ec"),
761            link_name = "llvm.aarch64.neon.famax.v4f32"
762        )]
763        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
764    }
765    unsafe { _vamaxq_f32(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
769#[inline]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(test, assert_instr(nop))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
774    unsafe extern "unadjusted" {
775        #[cfg_attr(
776            any(target_arch = "aarch64", target_arch = "arm64ec"),
777            link_name = "llvm.aarch64.neon.famax.v2f64"
778        )]
779        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
780    }
781    unsafe { _vamaxq_f64(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute minimum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
785#[inline]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(test, assert_instr(nop))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
790    unsafe extern "unadjusted" {
791        #[cfg_attr(
792            any(target_arch = "aarch64", target_arch = "arm64ec"),
793            link_name = "llvm.aarch64.neon.famin.v2f32"
794        )]
795        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
796    }
797    unsafe { _vamin_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute minimum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
801#[inline]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(test, assert_instr(nop))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
806    unsafe extern "unadjusted" {
807        #[cfg_attr(
808            any(target_arch = "aarch64", target_arch = "arm64ec"),
809            link_name = "llvm.aarch64.neon.famin.v4f32"
810        )]
811        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
812    }
813    unsafe { _vaminq_f32(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
817#[inline]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(test, assert_instr(nop))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
822    unsafe extern "unadjusted" {
823        #[cfg_attr(
824            any(target_arch = "aarch64", target_arch = "arm64ec"),
825            link_name = "llvm.aarch64.neon.famin.v2f64"
826        )]
827        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
828    }
829    unsafe { _vaminq_f64(a, b) }
830}
831#[doc = "Bit clear and exclusive OR"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
833#[inline]
834#[target_feature(enable = "neon,sha3")]
835#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
836#[cfg_attr(test, assert_instr(bcax))]
837pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
838    unsafe extern "unadjusted" {
839        #[cfg_attr(
840            any(target_arch = "aarch64", target_arch = "arm64ec"),
841            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
842        )]
843        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
844    }
845    unsafe { _vbcaxq_s8(a, b, c) }
846}
847#[doc = "Bit clear and exclusive OR"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
849#[inline]
850#[target_feature(enable = "neon,sha3")]
851#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
852#[cfg_attr(test, assert_instr(bcax))]
853pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
854    unsafe extern "unadjusted" {
855        #[cfg_attr(
856            any(target_arch = "aarch64", target_arch = "arm64ec"),
857            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
858        )]
859        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
860    }
861    unsafe { _vbcaxq_s16(a, b, c) }
862}
863#[doc = "Bit clear and exclusive OR"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
865#[inline]
866#[target_feature(enable = "neon,sha3")]
867#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
868#[cfg_attr(test, assert_instr(bcax))]
869pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
870    unsafe extern "unadjusted" {
871        #[cfg_attr(
872            any(target_arch = "aarch64", target_arch = "arm64ec"),
873            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
874        )]
875        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
876    }
877    unsafe { _vbcaxq_s32(a, b, c) }
878}
879#[doc = "Bit clear and exclusive OR"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
881#[inline]
882#[target_feature(enable = "neon,sha3")]
883#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
884#[cfg_attr(test, assert_instr(bcax))]
885pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
886    unsafe extern "unadjusted" {
887        #[cfg_attr(
888            any(target_arch = "aarch64", target_arch = "arm64ec"),
889            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
890        )]
891        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
892    }
893    unsafe { _vbcaxq_s64(a, b, c) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
897#[inline]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
902    unsafe extern "unadjusted" {
903        #[cfg_attr(
904            any(target_arch = "aarch64", target_arch = "arm64ec"),
905            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
906        )]
907        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
908    }
909    unsafe { _vbcaxq_u8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
913#[inline]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
918    unsafe extern "unadjusted" {
919        #[cfg_attr(
920            any(target_arch = "aarch64", target_arch = "arm64ec"),
921            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
922        )]
923        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
924    }
925    unsafe { _vbcaxq_u16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
929#[inline]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
934    unsafe extern "unadjusted" {
935        #[cfg_attr(
936            any(target_arch = "aarch64", target_arch = "arm64ec"),
937            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
938        )]
939        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
940    }
941    unsafe { _vbcaxq_u32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
945#[inline]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
950    unsafe extern "unadjusted" {
951        #[cfg_attr(
952            any(target_arch = "aarch64", target_arch = "arm64ec"),
953            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
954        )]
955        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
956    }
957    unsafe { _vbcaxq_u64(a, b, c) }
958}
959#[doc = "Floating-point complex add"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
961#[inline]
962#[target_feature(enable = "neon,fp16")]
963#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
964#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
965#[cfg(not(target_arch = "arm64ec"))]
966#[cfg_attr(test, assert_instr(fcadd))]
967pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
968    unsafe extern "unadjusted" {
969        #[cfg_attr(
970            any(target_arch = "aarch64", target_arch = "arm64ec"),
971            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
972        )]
973        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
974    }
975    unsafe { _vcadd_rot270_f16(a, b) }
976}
977#[doc = "Floating-point complex add"]
978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
979#[inline]
980#[target_feature(enable = "neon,fp16")]
981#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
982#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
983#[cfg(not(target_arch = "arm64ec"))]
984#[cfg_attr(test, assert_instr(fcadd))]
985pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
986    unsafe extern "unadjusted" {
987        #[cfg_attr(
988            any(target_arch = "aarch64", target_arch = "arm64ec"),
989            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
990        )]
991        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
992    }
993    unsafe { _vcaddq_rot270_f16(a, b) }
994}
995#[doc = "Floating-point complex add"]
996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
997#[inline]
998#[target_feature(enable = "neon,fcma")]
999#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1000#[cfg_attr(test, assert_instr(fcadd))]
1001pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1002    unsafe extern "unadjusted" {
1003        #[cfg_attr(
1004            any(target_arch = "aarch64", target_arch = "arm64ec"),
1005            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1006        )]
1007        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1008    }
1009    unsafe { _vcadd_rot270_f32(a, b) }
1010}
1011#[doc = "Floating-point complex add"]
1012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1013#[inline]
1014#[target_feature(enable = "neon,fcma")]
1015#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1016#[cfg_attr(test, assert_instr(fcadd))]
1017pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1018    unsafe extern "unadjusted" {
1019        #[cfg_attr(
1020            any(target_arch = "aarch64", target_arch = "arm64ec"),
1021            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1022        )]
1023        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1024    }
1025    unsafe { _vcaddq_rot270_f32(a, b) }
1026}
1027#[doc = "Floating-point complex add"]
1028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1029#[inline]
1030#[target_feature(enable = "neon,fcma")]
1031#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1032#[cfg_attr(test, assert_instr(fcadd))]
1033pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1034    unsafe extern "unadjusted" {
1035        #[cfg_attr(
1036            any(target_arch = "aarch64", target_arch = "arm64ec"),
1037            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1038        )]
1039        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1040    }
1041    unsafe { _vcaddq_rot270_f64(a, b) }
1042}
1043#[doc = "Floating-point complex add"]
1044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1045#[inline]
1046#[target_feature(enable = "neon,fp16")]
1047#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1048#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1049#[cfg(not(target_arch = "arm64ec"))]
1050#[cfg_attr(test, assert_instr(fcadd))]
1051pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1052    unsafe extern "unadjusted" {
1053        #[cfg_attr(
1054            any(target_arch = "aarch64", target_arch = "arm64ec"),
1055            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1056        )]
1057        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1058    }
1059    unsafe { _vcadd_rot90_f16(a, b) }
1060}
1061#[doc = "Floating-point complex add"]
1062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1063#[inline]
1064#[target_feature(enable = "neon,fp16")]
1065#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1066#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1067#[cfg(not(target_arch = "arm64ec"))]
1068#[cfg_attr(test, assert_instr(fcadd))]
1069pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1070    unsafe extern "unadjusted" {
1071        #[cfg_attr(
1072            any(target_arch = "aarch64", target_arch = "arm64ec"),
1073            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1074        )]
1075        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1076    }
1077    unsafe { _vcaddq_rot90_f16(a, b) }
1078}
1079#[doc = "Floating-point complex add"]
1080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1081#[inline]
1082#[target_feature(enable = "neon,fcma")]
1083#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1084#[cfg_attr(test, assert_instr(fcadd))]
1085pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1086    unsafe extern "unadjusted" {
1087        #[cfg_attr(
1088            any(target_arch = "aarch64", target_arch = "arm64ec"),
1089            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1090        )]
1091        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1092    }
1093    unsafe { _vcadd_rot90_f32(a, b) }
1094}
1095#[doc = "Floating-point complex add"]
1096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1097#[inline]
1098#[target_feature(enable = "neon,fcma")]
1099#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1100#[cfg_attr(test, assert_instr(fcadd))]
1101pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1102    unsafe extern "unadjusted" {
1103        #[cfg_attr(
1104            any(target_arch = "aarch64", target_arch = "arm64ec"),
1105            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1106        )]
1107        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1108    }
1109    unsafe { _vcaddq_rot90_f32(a, b) }
1110}
1111#[doc = "Floating-point complex add"]
1112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1113#[inline]
1114#[target_feature(enable = "neon,fcma")]
1115#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1116#[cfg_attr(test, assert_instr(fcadd))]
1117pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1118    unsafe extern "unadjusted" {
1119        #[cfg_attr(
1120            any(target_arch = "aarch64", target_arch = "arm64ec"),
1121            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1122        )]
1123        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1124    }
1125    unsafe { _vcaddq_rot90_f64(a, b) }
1126}
1127#[doc = "Floating-point absolute compare greater than or equal"]
1128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1129#[inline]
1130#[target_feature(enable = "neon")]
1131#[cfg_attr(test, assert_instr(facge))]
1132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1133pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1134    unsafe extern "unadjusted" {
1135        #[cfg_attr(
1136            any(target_arch = "aarch64", target_arch = "arm64ec"),
1137            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1138        )]
1139        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1140    }
1141    unsafe { _vcage_f64(a, b) }
1142}
1143#[doc = "Floating-point absolute compare greater than or equal"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1145#[inline]
1146#[target_feature(enable = "neon")]
1147#[cfg_attr(test, assert_instr(facge))]
1148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1149pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1150    unsafe extern "unadjusted" {
1151        #[cfg_attr(
1152            any(target_arch = "aarch64", target_arch = "arm64ec"),
1153            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1154        )]
1155        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1156    }
1157    unsafe { _vcageq_f64(a, b) }
1158}
1159#[doc = "Floating-point absolute compare greater than or equal"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1161#[inline]
1162#[target_feature(enable = "neon")]
1163#[cfg_attr(test, assert_instr(facge))]
1164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1165pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1166    unsafe extern "unadjusted" {
1167        #[cfg_attr(
1168            any(target_arch = "aarch64", target_arch = "arm64ec"),
1169            link_name = "llvm.aarch64.neon.facge.i64.f64"
1170        )]
1171        fn _vcaged_f64(a: f64, b: f64) -> u64;
1172    }
1173    unsafe { _vcaged_f64(a, b) }
1174}
1175#[doc = "Floating-point absolute compare greater than or equal"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1177#[inline]
1178#[target_feature(enable = "neon")]
1179#[cfg_attr(test, assert_instr(facge))]
1180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1181pub fn vcages_f32(a: f32, b: f32) -> u32 {
1182    unsafe extern "unadjusted" {
1183        #[cfg_attr(
1184            any(target_arch = "aarch64", target_arch = "arm64ec"),
1185            link_name = "llvm.aarch64.neon.facge.i32.f32"
1186        )]
1187        fn _vcages_f32(a: f32, b: f32) -> u32;
1188    }
1189    unsafe { _vcages_f32(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1193#[inline]
1194#[cfg_attr(test, assert_instr(facge))]
1195#[target_feature(enable = "neon,fp16")]
1196#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1197#[cfg(not(target_arch = "arm64ec"))]
1198pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1199    unsafe extern "unadjusted" {
1200        #[cfg_attr(
1201            any(target_arch = "aarch64", target_arch = "arm64ec"),
1202            link_name = "llvm.aarch64.neon.facge.i32.f16"
1203        )]
1204        fn _vcageh_f16(a: f16, b: f16) -> i32;
1205    }
1206    unsafe { _vcageh_f16(a, b) as u16 }
1207}
1208#[doc = "Floating-point absolute compare greater than"]
1209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1210#[inline]
1211#[target_feature(enable = "neon")]
1212#[cfg_attr(test, assert_instr(facgt))]
1213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1214pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1215    unsafe extern "unadjusted" {
1216        #[cfg_attr(
1217            any(target_arch = "aarch64", target_arch = "arm64ec"),
1218            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1219        )]
1220        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1221    }
1222    unsafe { _vcagt_f64(a, b) }
1223}
1224#[doc = "Floating-point absolute compare greater than"]
1225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1226#[inline]
1227#[target_feature(enable = "neon")]
1228#[cfg_attr(test, assert_instr(facgt))]
1229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1230pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1231    unsafe extern "unadjusted" {
1232        #[cfg_attr(
1233            any(target_arch = "aarch64", target_arch = "arm64ec"),
1234            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1235        )]
1236        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1237    }
1238    unsafe { _vcagtq_f64(a, b) }
1239}
1240#[doc = "Floating-point absolute compare greater than"]
1241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1242#[inline]
1243#[target_feature(enable = "neon")]
1244#[cfg_attr(test, assert_instr(facgt))]
1245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1246pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1247    unsafe extern "unadjusted" {
1248        #[cfg_attr(
1249            any(target_arch = "aarch64", target_arch = "arm64ec"),
1250            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1251        )]
1252        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1253    }
1254    unsafe { _vcagtd_f64(a, b) }
1255}
1256#[doc = "Floating-point absolute compare greater than"]
1257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1258#[inline]
1259#[target_feature(enable = "neon")]
1260#[cfg_attr(test, assert_instr(facgt))]
1261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1262pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1263    unsafe extern "unadjusted" {
1264        #[cfg_attr(
1265            any(target_arch = "aarch64", target_arch = "arm64ec"),
1266            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1267        )]
1268        fn _vcagts_f32(a: f32, b: f32) -> u32;
1269    }
1270    unsafe { _vcagts_f32(a, b) }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1274#[inline]
1275#[cfg_attr(test, assert_instr(facgt))]
1276#[target_feature(enable = "neon,fp16")]
1277#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1278#[cfg(not(target_arch = "arm64ec"))]
1279pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1280    unsafe extern "unadjusted" {
1281        #[cfg_attr(
1282            any(target_arch = "aarch64", target_arch = "arm64ec"),
1283            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1284        )]
1285        fn _vcagth_f16(a: f16, b: f16) -> i32;
1286    }
1287    unsafe { _vcagth_f16(a, b) as u16 }
1288}
1289#[doc = "Floating-point absolute compare less than or equal"]
1290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1291#[inline]
1292#[target_feature(enable = "neon")]
1293#[cfg_attr(test, assert_instr(facge))]
1294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1295pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1296    vcage_f64(b, a)
1297}
1298#[doc = "Floating-point absolute compare less than or equal"]
1299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1300#[inline]
1301#[target_feature(enable = "neon")]
1302#[cfg_attr(test, assert_instr(facge))]
1303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1304pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1305    vcageq_f64(b, a)
1306}
1307#[doc = "Floating-point absolute compare less than or equal"]
1308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1309#[inline]
1310#[target_feature(enable = "neon")]
1311#[cfg_attr(test, assert_instr(facge))]
1312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1313pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1314    vcaged_f64(b, a)
1315}
1316#[doc = "Floating-point absolute compare less than or equal"]
1317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1318#[inline]
1319#[target_feature(enable = "neon")]
1320#[cfg_attr(test, assert_instr(facge))]
1321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1322pub fn vcales_f32(a: f32, b: f32) -> u32 {
1323    vcages_f32(b, a)
1324}
1325#[doc = "Floating-point absolute compare less than or equal"]
1326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1327#[inline]
1328#[cfg_attr(test, assert_instr(facge))]
1329#[target_feature(enable = "neon,fp16")]
1330#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1331#[cfg(not(target_arch = "arm64ec"))]
1332pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1333    vcageh_f16(b, a)
1334}
1335#[doc = "Floating-point absolute compare less than"]
1336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1337#[inline]
1338#[target_feature(enable = "neon")]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1341pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1342    vcagt_f64(b, a)
1343}
1344#[doc = "Floating-point absolute compare less than"]
1345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1346#[inline]
1347#[target_feature(enable = "neon")]
1348#[cfg_attr(test, assert_instr(facgt))]
1349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1350pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1351    vcagtq_f64(b, a)
1352}
1353#[doc = "Floating-point absolute compare less than"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1355#[inline]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facgt))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1360    vcagtd_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1364#[inline]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facgt))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1369    vcagts_f32(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1373#[inline]
1374#[cfg_attr(test, assert_instr(facgt))]
1375#[target_feature(enable = "neon,fp16")]
1376#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1377#[cfg(not(target_arch = "arm64ec"))]
1378pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1379    vcagth_f16(b, a)
1380}
1381#[doc = "Floating-point compare equal"]
1382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1383#[inline]
1384#[target_feature(enable = "neon")]
1385#[cfg_attr(test, assert_instr(fcmeq))]
1386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1387pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1388    unsafe { simd_eq(a, b) }
1389}
1390#[doc = "Floating-point compare equal"]
1391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1392#[inline]
1393#[target_feature(enable = "neon")]
1394#[cfg_attr(test, assert_instr(fcmeq))]
1395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1396pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1397    unsafe { simd_eq(a, b) }
1398}
1399#[doc = "Compare bitwise Equal (vector)"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1401#[inline]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(cmeq))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1406    unsafe { simd_eq(a, b) }
1407}
1408#[doc = "Compare bitwise Equal (vector)"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1410#[inline]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(cmeq))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1415    unsafe { simd_eq(a, b) }
1416}
1417#[doc = "Compare bitwise Equal (vector)"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1419#[inline]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(cmeq))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1424    unsafe { simd_eq(a, b) }
1425}
1426#[doc = "Compare bitwise Equal (vector)"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1428#[inline]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(cmeq))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1433    unsafe { simd_eq(a, b) }
1434}
1435#[doc = "Compare bitwise Equal (vector)"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1437#[inline]
1438#[target_feature(enable = "neon")]
1439#[cfg_attr(test, assert_instr(cmeq))]
1440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1441pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1442    unsafe { simd_eq(a, b) }
1443}
1444#[doc = "Compare bitwise Equal (vector)"]
1445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1446#[inline]
1447#[target_feature(enable = "neon")]
1448#[cfg_attr(test, assert_instr(cmeq))]
1449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1450pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1451    unsafe { simd_eq(a, b) }
1452}
1453#[doc = "Floating-point compare equal"]
1454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1455#[inline]
1456#[target_feature(enable = "neon")]
1457#[cfg_attr(test, assert_instr(fcmp))]
1458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1459pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1460    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1461}
1462#[doc = "Floating-point compare equal"]
1463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1464#[inline]
1465#[target_feature(enable = "neon")]
1466#[cfg_attr(test, assert_instr(fcmp))]
1467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1468pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1469    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1470}
1471#[doc = "Compare bitwise equal"]
1472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1473#[inline]
1474#[target_feature(enable = "neon")]
1475#[cfg_attr(test, assert_instr(cmp))]
1476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1477pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1478    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1479}
1480#[doc = "Compare bitwise equal"]
1481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1482#[inline]
1483#[target_feature(enable = "neon")]
1484#[cfg_attr(test, assert_instr(cmp))]
1485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1486pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1487    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1488}
1489#[doc = "Floating-point compare equal"]
1490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1491#[inline]
1492#[cfg_attr(test, assert_instr(fcmp))]
1493#[target_feature(enable = "neon,fp16")]
1494#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1495#[cfg(not(target_arch = "arm64ec"))]
1496pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1497    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1498}
1499#[doc = "Floating-point compare bitwise equal to zero"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1501#[inline]
1502#[cfg_attr(test, assert_instr(fcmeq))]
1503#[target_feature(enable = "neon,fp16")]
1504#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1505#[cfg(not(target_arch = "arm64ec"))]
1506pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1507    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1508    unsafe { simd_eq(a, transmute(b)) }
1509}
1510#[doc = "Floating-point compare bitwise equal to zero"]
1511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1512#[inline]
1513#[cfg_attr(test, assert_instr(fcmeq))]
1514#[target_feature(enable = "neon,fp16")]
1515#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1516#[cfg(not(target_arch = "arm64ec"))]
1517pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1518    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1519    unsafe { simd_eq(a, transmute(b)) }
1520}
1521#[doc = "Floating-point compare bitwise equal to zero"]
1522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1523#[inline]
1524#[target_feature(enable = "neon")]
1525#[cfg_attr(test, assert_instr(fcmeq))]
1526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1527pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1528    let b: f32x2 = f32x2::new(0.0, 0.0);
1529    unsafe { simd_eq(a, transmute(b)) }
1530}
1531#[doc = "Floating-point compare bitwise equal to zero"]
1532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1533#[inline]
1534#[target_feature(enable = "neon")]
1535#[cfg_attr(test, assert_instr(fcmeq))]
1536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1537pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1538    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1539    unsafe { simd_eq(a, transmute(b)) }
1540}
1541#[doc = "Floating-point compare bitwise equal to zero"]
1542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1543#[inline]
1544#[target_feature(enable = "neon")]
1545#[cfg_attr(test, assert_instr(fcmeq))]
1546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1547pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1548    let b: f64 = 0.0;
1549    unsafe { simd_eq(a, transmute(b)) }
1550}
1551#[doc = "Floating-point compare bitwise equal to zero"]
1552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1553#[inline]
1554#[target_feature(enable = "neon")]
1555#[cfg_attr(test, assert_instr(fcmeq))]
1556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1557pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1558    let b: f64x2 = f64x2::new(0.0, 0.0);
1559    unsafe { simd_eq(a, transmute(b)) }
1560}
1561#[doc = "Signed compare bitwise equal to zero"]
1562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1563#[inline]
1564#[target_feature(enable = "neon")]
1565#[cfg_attr(test, assert_instr(cmeq))]
1566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1567pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1568    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1569    unsafe { simd_eq(a, transmute(b)) }
1570}
1571#[doc = "Signed compare bitwise equal to zero"]
1572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1573#[inline]
1574#[target_feature(enable = "neon")]
1575#[cfg_attr(test, assert_instr(cmeq))]
1576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1577pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1578    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1579    unsafe { simd_eq(a, transmute(b)) }
1580}
1581#[doc = "Signed compare bitwise equal to zero"]
1582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1583#[inline]
1584#[target_feature(enable = "neon")]
1585#[cfg_attr(test, assert_instr(cmeq))]
1586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1587pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1588    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1589    unsafe { simd_eq(a, transmute(b)) }
1590}
1591#[doc = "Signed compare bitwise equal to zero"]
1592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1593#[inline]
1594#[target_feature(enable = "neon")]
1595#[cfg_attr(test, assert_instr(cmeq))]
1596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1597pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1598    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1599    unsafe { simd_eq(a, transmute(b)) }
1600}
1601#[doc = "Signed compare bitwise equal to zero"]
1602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1603#[inline]
1604#[target_feature(enable = "neon")]
1605#[cfg_attr(test, assert_instr(cmeq))]
1606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1607pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1608    let b: i32x2 = i32x2::new(0, 0);
1609    unsafe { simd_eq(a, transmute(b)) }
1610}
1611#[doc = "Signed compare bitwise equal to zero"]
1612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1613#[inline]
1614#[target_feature(enable = "neon")]
1615#[cfg_attr(test, assert_instr(cmeq))]
1616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1617pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1618    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1619    unsafe { simd_eq(a, transmute(b)) }
1620}
1621#[doc = "Signed compare bitwise equal to zero"]
1622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1623#[inline]
1624#[target_feature(enable = "neon")]
1625#[cfg_attr(test, assert_instr(cmeq))]
1626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1627pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1628    let b: i64x1 = i64x1::new(0);
1629    unsafe { simd_eq(a, transmute(b)) }
1630}
1631#[doc = "Signed compare bitwise equal to zero"]
1632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1633#[inline]
1634#[target_feature(enable = "neon")]
1635#[cfg_attr(test, assert_instr(cmeq))]
1636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1637pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1638    let b: i64x2 = i64x2::new(0, 0);
1639    unsafe { simd_eq(a, transmute(b)) }
1640}
1641#[doc = "Signed compare bitwise equal to zero"]
1642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1643#[inline]
1644#[target_feature(enable = "neon")]
1645#[cfg_attr(test, assert_instr(cmeq))]
1646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1647pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1648    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1649    unsafe { simd_eq(a, transmute(b)) }
1650}
1651#[doc = "Signed compare bitwise equal to zero"]
1652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1653#[inline]
1654#[target_feature(enable = "neon")]
1655#[cfg_attr(test, assert_instr(cmeq))]
1656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1657pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1658    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1659    unsafe { simd_eq(a, transmute(b)) }
1660}
1661#[doc = "Signed compare bitwise equal to zero"]
1662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1663#[inline]
1664#[target_feature(enable = "neon")]
1665#[cfg_attr(test, assert_instr(cmeq))]
1666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1667pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1668    let b: i64x1 = i64x1::new(0);
1669    unsafe { simd_eq(a, transmute(b)) }
1670}
1671#[doc = "Signed compare bitwise equal to zero"]
1672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1673#[inline]
1674#[target_feature(enable = "neon")]
1675#[cfg_attr(test, assert_instr(cmeq))]
1676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1677pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1678    let b: i64x2 = i64x2::new(0, 0);
1679    unsafe { simd_eq(a, transmute(b)) }
1680}
1681#[doc = "Unsigned compare bitwise equal to zero"]
1682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1683#[inline]
1684#[target_feature(enable = "neon")]
1685#[cfg_attr(test, assert_instr(cmeq))]
1686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1687pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1688    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1689    unsafe { simd_eq(a, transmute(b)) }
1690}
1691#[doc = "Unsigned compare bitwise equal to zero"]
1692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1693#[inline]
1694#[target_feature(enable = "neon")]
1695#[cfg_attr(test, assert_instr(cmeq))]
1696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1697pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1698    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1699    unsafe { simd_eq(a, transmute(b)) }
1700}
1701#[doc = "Unsigned compare bitwise equal to zero"]
1702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1703#[inline]
1704#[target_feature(enable = "neon")]
1705#[cfg_attr(test, assert_instr(cmeq))]
1706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1707pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1708    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1709    unsafe { simd_eq(a, transmute(b)) }
1710}
1711#[doc = "Unsigned compare bitwise equal to zero"]
1712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1713#[inline]
1714#[target_feature(enable = "neon")]
1715#[cfg_attr(test, assert_instr(cmeq))]
1716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1717pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1718    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1719    unsafe { simd_eq(a, transmute(b)) }
1720}
1721#[doc = "Unsigned compare bitwise equal to zero"]
1722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1723#[inline]
1724#[target_feature(enable = "neon")]
1725#[cfg_attr(test, assert_instr(cmeq))]
1726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1727pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1728    let b: u32x2 = u32x2::new(0, 0);
1729    unsafe { simd_eq(a, transmute(b)) }
1730}
1731#[doc = "Unsigned compare bitwise equal to zero"]
1732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1733#[inline]
1734#[target_feature(enable = "neon")]
1735#[cfg_attr(test, assert_instr(cmeq))]
1736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1737pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1738    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1739    unsafe { simd_eq(a, transmute(b)) }
1740}
1741#[doc = "Unsigned compare bitwise equal to zero"]
1742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1743#[inline]
1744#[target_feature(enable = "neon")]
1745#[cfg_attr(test, assert_instr(cmeq))]
1746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1747pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1748    let b: u64x1 = u64x1::new(0);
1749    unsafe { simd_eq(a, transmute(b)) }
1750}
1751#[doc = "Unsigned compare bitwise equal to zero"]
1752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1753#[inline]
1754#[target_feature(enable = "neon")]
1755#[cfg_attr(test, assert_instr(cmeq))]
1756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1757pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1758    let b: u64x2 = u64x2::new(0, 0);
1759    unsafe { simd_eq(a, transmute(b)) }
1760}
1761#[doc = "Compare bitwise equal to zero"]
1762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1763#[inline]
1764#[target_feature(enable = "neon")]
1765#[cfg_attr(test, assert_instr(cmp))]
1766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1767pub fn vceqzd_s64(a: i64) -> u64 {
1768    unsafe { transmute(vceqz_s64(transmute(a))) }
1769}
1770#[doc = "Compare bitwise equal to zero"]
1771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1772#[inline]
1773#[target_feature(enable = "neon")]
1774#[cfg_attr(test, assert_instr(cmp))]
1775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1776pub fn vceqzd_u64(a: u64) -> u64 {
1777    unsafe { transmute(vceqz_u64(transmute(a))) }
1778}
1779#[doc = "Floating-point compare bitwise equal to zero"]
1780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1781#[inline]
1782#[cfg_attr(test, assert_instr(fcmp))]
1783#[target_feature(enable = "neon,fp16")]
1784#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1785#[cfg(not(target_arch = "arm64ec"))]
1786pub fn vceqzh_f16(a: f16) -> u16 {
1787    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1788}
1789#[doc = "Floating-point compare bitwise equal to zero"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1791#[inline]
1792#[target_feature(enable = "neon")]
1793#[cfg_attr(test, assert_instr(fcmp))]
1794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1795pub fn vceqzs_f32(a: f32) -> u32 {
1796    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1797}
1798#[doc = "Floating-point compare bitwise equal to zero"]
1799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1800#[inline]
1801#[target_feature(enable = "neon")]
1802#[cfg_attr(test, assert_instr(fcmp))]
1803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1804pub fn vceqzd_f64(a: f64) -> u64 {
1805    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1806}
1807#[doc = "Floating-point compare greater than or equal"]
1808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1809#[inline]
1810#[target_feature(enable = "neon")]
1811#[cfg_attr(test, assert_instr(fcmge))]
1812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1813pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1814    unsafe { simd_ge(a, b) }
1815}
1816#[doc = "Floating-point compare greater than or equal"]
1817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1818#[inline]
1819#[target_feature(enable = "neon")]
1820#[cfg_attr(test, assert_instr(fcmge))]
1821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1822pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1823    unsafe { simd_ge(a, b) }
1824}
1825#[doc = "Compare signed greater than or equal"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1827#[inline]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmge))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1832    unsafe { simd_ge(a, b) }
1833}
1834#[doc = "Compare signed greater than or equal"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1836#[inline]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmge))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1841    unsafe { simd_ge(a, b) }
1842}
1843#[doc = "Compare unsigned greater than or equal"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1845#[inline]
1846#[target_feature(enable = "neon")]
1847#[cfg_attr(test, assert_instr(cmhs))]
1848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1849pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1850    unsafe { simd_ge(a, b) }
1851}
1852#[doc = "Compare unsigned greater than or equal"]
1853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1854#[inline]
1855#[target_feature(enable = "neon")]
1856#[cfg_attr(test, assert_instr(cmhs))]
1857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1858pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1859    unsafe { simd_ge(a, b) }
1860}
1861#[doc = "Floating-point compare greater than or equal"]
1862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1863#[inline]
1864#[target_feature(enable = "neon")]
1865#[cfg_attr(test, assert_instr(fcmp))]
1866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1867pub fn vcged_f64(a: f64, b: f64) -> u64 {
1868    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1869}
1870#[doc = "Floating-point compare greater than or equal"]
1871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1872#[inline]
1873#[target_feature(enable = "neon")]
1874#[cfg_attr(test, assert_instr(fcmp))]
1875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1876pub fn vcges_f32(a: f32, b: f32) -> u32 {
1877    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1878}
1879#[doc = "Compare greater than or equal"]
1880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1881#[inline]
1882#[target_feature(enable = "neon")]
1883#[cfg_attr(test, assert_instr(cmp))]
1884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1885pub fn vcged_s64(a: i64, b: i64) -> u64 {
1886    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1887}
1888#[doc = "Compare greater than or equal"]
1889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1890#[inline]
1891#[target_feature(enable = "neon")]
1892#[cfg_attr(test, assert_instr(cmp))]
1893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1894pub fn vcged_u64(a: u64, b: u64) -> u64 {
1895    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1896}
1897#[doc = "Floating-point compare greater than or equal"]
1898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1899#[inline]
1900#[cfg_attr(test, assert_instr(fcmp))]
1901#[target_feature(enable = "neon,fp16")]
1902#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1903#[cfg(not(target_arch = "arm64ec"))]
1904pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1905    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1906}
1907#[doc = "Floating-point compare greater than or equal to zero"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1909#[inline]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(fcmge))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1914    let b: f32x2 = f32x2::new(0.0, 0.0);
1915    unsafe { simd_ge(a, transmute(b)) }
1916}
1917#[doc = "Floating-point compare greater than or equal to zero"]
1918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1919#[inline]
1920#[target_feature(enable = "neon")]
1921#[cfg_attr(test, assert_instr(fcmge))]
1922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1923pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1924    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1925    unsafe { simd_ge(a, transmute(b)) }
1926}
1927#[doc = "Floating-point compare greater than or equal to zero"]
1928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1929#[inline]
1930#[target_feature(enable = "neon")]
1931#[cfg_attr(test, assert_instr(fcmge))]
1932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1933pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1934    let b: f64 = 0.0;
1935    unsafe { simd_ge(a, transmute(b)) }
1936}
1937#[doc = "Floating-point compare greater than or equal to zero"]
1938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
1939#[inline]
1940#[target_feature(enable = "neon")]
1941#[cfg_attr(test, assert_instr(fcmge))]
1942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1943pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
1944    let b: f64x2 = f64x2::new(0.0, 0.0);
1945    unsafe { simd_ge(a, transmute(b)) }
1946}
1947#[doc = "Compare signed greater than or equal to zero"]
1948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
1949#[inline]
1950#[target_feature(enable = "neon")]
1951#[cfg_attr(test, assert_instr(cmge))]
1952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1953pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
1954    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1955    unsafe { simd_ge(a, transmute(b)) }
1956}
1957#[doc = "Compare signed greater than or equal to zero"]
1958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
1959#[inline]
1960#[target_feature(enable = "neon")]
1961#[cfg_attr(test, assert_instr(cmge))]
1962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1963pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
1964    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1965    unsafe { simd_ge(a, transmute(b)) }
1966}
1967#[doc = "Compare signed greater than or equal to zero"]
1968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
1969#[inline]
1970#[target_feature(enable = "neon")]
1971#[cfg_attr(test, assert_instr(cmge))]
1972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1973pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
1974    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1975    unsafe { simd_ge(a, transmute(b)) }
1976}
1977#[doc = "Compare signed greater than or equal to zero"]
1978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
1979#[inline]
1980#[target_feature(enable = "neon")]
1981#[cfg_attr(test, assert_instr(cmge))]
1982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1983pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
1984    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1985    unsafe { simd_ge(a, transmute(b)) }
1986}
1987#[doc = "Compare signed greater than or equal to zero"]
1988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
1989#[inline]
1990#[target_feature(enable = "neon")]
1991#[cfg_attr(test, assert_instr(cmge))]
1992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1993pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
1994    let b: i32x2 = i32x2::new(0, 0);
1995    unsafe { simd_ge(a, transmute(b)) }
1996}
1997#[doc = "Compare signed greater than or equal to zero"]
1998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
1999#[inline]
2000#[target_feature(enable = "neon")]
2001#[cfg_attr(test, assert_instr(cmge))]
2002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2003pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2004    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2005    unsafe { simd_ge(a, transmute(b)) }
2006}
2007#[doc = "Compare signed greater than or equal to zero"]
2008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2009#[inline]
2010#[target_feature(enable = "neon")]
2011#[cfg_attr(test, assert_instr(cmge))]
2012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2013pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2014    let b: i64x1 = i64x1::new(0);
2015    unsafe { simd_ge(a, transmute(b)) }
2016}
2017#[doc = "Compare signed greater than or equal to zero"]
2018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2019#[inline]
2020#[target_feature(enable = "neon")]
2021#[cfg_attr(test, assert_instr(cmge))]
2022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2023pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2024    let b: i64x2 = i64x2::new(0, 0);
2025    unsafe { simd_ge(a, transmute(b)) }
2026}
2027#[doc = "Floating-point compare greater than or equal to zero"]
2028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2029#[inline]
2030#[target_feature(enable = "neon")]
2031#[cfg_attr(test, assert_instr(fcmp))]
2032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2033pub fn vcgezd_f64(a: f64) -> u64 {
2034    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2035}
2036#[doc = "Floating-point compare greater than or equal to zero"]
2037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2038#[inline]
2039#[target_feature(enable = "neon")]
2040#[cfg_attr(test, assert_instr(fcmp))]
2041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2042pub fn vcgezs_f32(a: f32) -> u32 {
2043    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2044}
2045#[doc = "Compare signed greater than or equal to zero"]
2046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2047#[inline]
2048#[target_feature(enable = "neon")]
2049#[cfg_attr(test, assert_instr(nop))]
2050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2051pub fn vcgezd_s64(a: i64) -> u64 {
2052    unsafe { transmute(vcgez_s64(transmute(a))) }
2053}
2054#[doc = "Floating-point compare greater than or equal to zero"]
2055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2056#[inline]
2057#[cfg_attr(test, assert_instr(fcmp))]
2058#[target_feature(enable = "neon,fp16")]
2059#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2060#[cfg(not(target_arch = "arm64ec"))]
2061pub fn vcgezh_f16(a: f16) -> u16 {
2062    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2063}
2064#[doc = "Floating-point compare greater than"]
2065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2066#[inline]
2067#[target_feature(enable = "neon")]
2068#[cfg_attr(test, assert_instr(fcmgt))]
2069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2070pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2071    unsafe { simd_gt(a, b) }
2072}
2073#[doc = "Floating-point compare greater than"]
2074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2075#[inline]
2076#[target_feature(enable = "neon")]
2077#[cfg_attr(test, assert_instr(fcmgt))]
2078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2079pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2080    unsafe { simd_gt(a, b) }
2081}
2082#[doc = "Compare signed greater than"]
2083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2084#[inline]
2085#[target_feature(enable = "neon")]
2086#[cfg_attr(test, assert_instr(cmgt))]
2087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2088pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2089    unsafe { simd_gt(a, b) }
2090}
2091#[doc = "Compare signed greater than"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2093#[inline]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(cmgt))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2098    unsafe { simd_gt(a, b) }
2099}
2100#[doc = "Compare unsigned greater than"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2102#[inline]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(cmhi))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2107    unsafe { simd_gt(a, b) }
2108}
2109#[doc = "Compare unsigned greater than"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2111#[inline]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(cmhi))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2116    unsafe { simd_gt(a, b) }
2117}
2118#[doc = "Floating-point compare greater than"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2120#[inline]
2121#[target_feature(enable = "neon")]
2122#[cfg_attr(test, assert_instr(fcmp))]
2123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2124pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2125    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2126}
2127#[doc = "Floating-point compare greater than"]
2128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2129#[inline]
2130#[target_feature(enable = "neon")]
2131#[cfg_attr(test, assert_instr(fcmp))]
2132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2133pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2134    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2135}
2136#[doc = "Compare greater than"]
2137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2138#[inline]
2139#[target_feature(enable = "neon")]
2140#[cfg_attr(test, assert_instr(cmp))]
2141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2142pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2143    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2144}
2145#[doc = "Compare greater than"]
2146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2147#[inline]
2148#[target_feature(enable = "neon")]
2149#[cfg_attr(test, assert_instr(cmp))]
2150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2151pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2152    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2153}
2154#[doc = "Floating-point compare greater than"]
2155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2156#[inline]
2157#[cfg_attr(test, assert_instr(fcmp))]
2158#[target_feature(enable = "neon,fp16")]
2159#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2160#[cfg(not(target_arch = "arm64ec"))]
2161pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2162    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2163}
2164#[doc = "Floating-point compare greater than zero"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2166#[inline]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(fcmgt))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2171    let b: f32x2 = f32x2::new(0.0, 0.0);
2172    unsafe { simd_gt(a, transmute(b)) }
2173}
2174#[doc = "Floating-point compare greater than zero"]
2175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2176#[inline]
2177#[target_feature(enable = "neon")]
2178#[cfg_attr(test, assert_instr(fcmgt))]
2179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2180pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2181    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2182    unsafe { simd_gt(a, transmute(b)) }
2183}
2184#[doc = "Floating-point compare greater than zero"]
2185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(fcmgt))]
2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2190pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2191    let b: f64 = 0.0;
2192    unsafe { simd_gt(a, transmute(b)) }
2193}
2194#[doc = "Floating-point compare greater than zero"]
2195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2196#[inline]
2197#[target_feature(enable = "neon")]
2198#[cfg_attr(test, assert_instr(fcmgt))]
2199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2200pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2201    let b: f64x2 = f64x2::new(0.0, 0.0);
2202    unsafe { simd_gt(a, transmute(b)) }
2203}
2204#[doc = "Compare signed greater than zero"]
2205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2206#[inline]
2207#[target_feature(enable = "neon")]
2208#[cfg_attr(test, assert_instr(cmgt))]
2209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2210pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2211    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2212    unsafe { simd_gt(a, transmute(b)) }
2213}
2214#[doc = "Compare signed greater than zero"]
2215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2216#[inline]
2217#[target_feature(enable = "neon")]
2218#[cfg_attr(test, assert_instr(cmgt))]
2219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2220pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2221    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2222    unsafe { simd_gt(a, transmute(b)) }
2223}
2224#[doc = "Compare signed greater than zero"]
2225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2226#[inline]
2227#[target_feature(enable = "neon")]
2228#[cfg_attr(test, assert_instr(cmgt))]
2229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2230pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2231    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2232    unsafe { simd_gt(a, transmute(b)) }
2233}
2234#[doc = "Compare signed greater than zero"]
2235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2236#[inline]
2237#[target_feature(enable = "neon")]
2238#[cfg_attr(test, assert_instr(cmgt))]
2239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2240pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2241    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2242    unsafe { simd_gt(a, transmute(b)) }
2243}
2244#[doc = "Compare signed greater than zero"]
2245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2246#[inline]
2247#[target_feature(enable = "neon")]
2248#[cfg_attr(test, assert_instr(cmgt))]
2249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2250pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2251    let b: i32x2 = i32x2::new(0, 0);
2252    unsafe { simd_gt(a, transmute(b)) }
2253}
2254#[doc = "Compare signed greater than zero"]
2255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2256#[inline]
2257#[target_feature(enable = "neon")]
2258#[cfg_attr(test, assert_instr(cmgt))]
2259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2260pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2261    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2262    unsafe { simd_gt(a, transmute(b)) }
2263}
2264#[doc = "Compare signed greater than zero"]
2265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2266#[inline]
2267#[target_feature(enable = "neon")]
2268#[cfg_attr(test, assert_instr(cmgt))]
2269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2270pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2271    let b: i64x1 = i64x1::new(0);
2272    unsafe { simd_gt(a, transmute(b)) }
2273}
2274#[doc = "Compare signed greater than zero"]
2275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2276#[inline]
2277#[target_feature(enable = "neon")]
2278#[cfg_attr(test, assert_instr(cmgt))]
2279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2280pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2281    let b: i64x2 = i64x2::new(0, 0);
2282    unsafe { simd_gt(a, transmute(b)) }
2283}
2284#[doc = "Floating-point compare greater than zero"]
2285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2286#[inline]
2287#[target_feature(enable = "neon")]
2288#[cfg_attr(test, assert_instr(fcmp))]
2289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2290pub fn vcgtzd_f64(a: f64) -> u64 {
2291    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2292}
2293#[doc = "Floating-point compare greater than zero"]
2294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2295#[inline]
2296#[target_feature(enable = "neon")]
2297#[cfg_attr(test, assert_instr(fcmp))]
2298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2299pub fn vcgtzs_f32(a: f32) -> u32 {
2300    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2301}
2302#[doc = "Compare signed greater than zero"]
2303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2304#[inline]
2305#[target_feature(enable = "neon")]
2306#[cfg_attr(test, assert_instr(cmp))]
2307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2308pub fn vcgtzd_s64(a: i64) -> u64 {
2309    unsafe { transmute(vcgtz_s64(transmute(a))) }
2310}
2311#[doc = "Floating-point compare greater than zero"]
2312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2313#[inline]
2314#[cfg_attr(test, assert_instr(fcmp))]
2315#[target_feature(enable = "neon,fp16")]
2316#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2317#[cfg(not(target_arch = "arm64ec"))]
2318pub fn vcgtzh_f16(a: f16) -> u16 {
2319    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2320}
2321#[doc = "Floating-point compare less than or equal"]
2322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2323#[inline]
2324#[target_feature(enable = "neon")]
2325#[cfg_attr(test, assert_instr(fcmge))]
2326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2327pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2328    unsafe { simd_le(a, b) }
2329}
2330#[doc = "Floating-point compare less than or equal"]
2331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2332#[inline]
2333#[target_feature(enable = "neon")]
2334#[cfg_attr(test, assert_instr(fcmge))]
2335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2336pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2337    unsafe { simd_le(a, b) }
2338}
2339#[doc = "Compare signed less than or equal"]
2340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2341#[inline]
2342#[target_feature(enable = "neon")]
2343#[cfg_attr(test, assert_instr(cmge))]
2344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2345pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2346    unsafe { simd_le(a, b) }
2347}
2348#[doc = "Compare signed less than or equal"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2350#[inline]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(cmge))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2355    unsafe { simd_le(a, b) }
2356}
2357#[doc = "Compare unsigned less than or equal"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2359#[inline]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(cmhs))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2364    unsafe { simd_le(a, b) }
2365}
2366#[doc = "Compare unsigned less than or equal"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2368#[inline]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmhs))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2373    unsafe { simd_le(a, b) }
2374}
2375#[doc = "Floating-point compare less than or equal"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2377#[inline]
2378#[target_feature(enable = "neon")]
2379#[cfg_attr(test, assert_instr(fcmp))]
2380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2381pub fn vcled_f64(a: f64, b: f64) -> u64 {
2382    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2383}
2384#[doc = "Floating-point compare less than or equal"]
2385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2386#[inline]
2387#[target_feature(enable = "neon")]
2388#[cfg_attr(test, assert_instr(fcmp))]
2389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2390pub fn vcles_f32(a: f32, b: f32) -> u32 {
2391    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2392}
2393#[doc = "Compare less than or equal"]
2394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2395#[inline]
2396#[target_feature(enable = "neon")]
2397#[cfg_attr(test, assert_instr(cmp))]
2398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2399pub fn vcled_u64(a: u64, b: u64) -> u64 {
2400    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2401}
2402#[doc = "Compare less than or equal"]
2403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2404#[inline]
2405#[target_feature(enable = "neon")]
2406#[cfg_attr(test, assert_instr(cmp))]
2407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2408pub fn vcled_s64(a: i64, b: i64) -> u64 {
2409    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2410}
2411#[doc = "Floating-point compare less than or equal"]
2412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2413#[inline]
2414#[cfg_attr(test, assert_instr(fcmp))]
2415#[target_feature(enable = "neon,fp16")]
2416#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2417#[cfg(not(target_arch = "arm64ec"))]
2418pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2419    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2420}
2421#[doc = "Floating-point compare less than or equal to zero"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2423#[inline]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(fcmle))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2428    let b: f32x2 = f32x2::new(0.0, 0.0);
2429    unsafe { simd_le(a, transmute(b)) }
2430}
2431#[doc = "Floating-point compare less than or equal to zero"]
2432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2433#[inline]
2434#[target_feature(enable = "neon")]
2435#[cfg_attr(test, assert_instr(fcmle))]
2436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2437pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2438    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2439    unsafe { simd_le(a, transmute(b)) }
2440}
2441#[doc = "Floating-point compare less than or equal to zero"]
2442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2443#[inline]
2444#[target_feature(enable = "neon")]
2445#[cfg_attr(test, assert_instr(fcmle))]
2446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2447pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2448    let b: f64 = 0.0;
2449    unsafe { simd_le(a, transmute(b)) }
2450}
2451#[doc = "Floating-point compare less than or equal to zero"]
2452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2453#[inline]
2454#[target_feature(enable = "neon")]
2455#[cfg_attr(test, assert_instr(fcmle))]
2456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2457pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2458    let b: f64x2 = f64x2::new(0.0, 0.0);
2459    unsafe { simd_le(a, transmute(b)) }
2460}
2461#[doc = "Compare signed less than or equal to zero"]
2462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2463#[inline]
2464#[target_feature(enable = "neon")]
2465#[cfg_attr(test, assert_instr(cmle))]
2466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2467pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2468    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2469    unsafe { simd_le(a, transmute(b)) }
2470}
2471#[doc = "Compare signed less than or equal to zero"]
2472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2473#[inline]
2474#[target_feature(enable = "neon")]
2475#[cfg_attr(test, assert_instr(cmle))]
2476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2477pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2478    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2479    unsafe { simd_le(a, transmute(b)) }
2480}
2481#[doc = "Compare signed less than or equal to zero"]
2482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2483#[inline]
2484#[target_feature(enable = "neon")]
2485#[cfg_attr(test, assert_instr(cmle))]
2486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2487pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2488    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2489    unsafe { simd_le(a, transmute(b)) }
2490}
2491#[doc = "Compare signed less than or equal to zero"]
2492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2493#[inline]
2494#[target_feature(enable = "neon")]
2495#[cfg_attr(test, assert_instr(cmle))]
2496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2497pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2498    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2499    unsafe { simd_le(a, transmute(b)) }
2500}
2501#[doc = "Compare signed less than or equal to zero"]
2502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2503#[inline]
2504#[target_feature(enable = "neon")]
2505#[cfg_attr(test, assert_instr(cmle))]
2506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2507pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2508    let b: i32x2 = i32x2::new(0, 0);
2509    unsafe { simd_le(a, transmute(b)) }
2510}
2511#[doc = "Compare signed less than or equal to zero"]
2512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2513#[inline]
2514#[target_feature(enable = "neon")]
2515#[cfg_attr(test, assert_instr(cmle))]
2516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2517pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2518    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2519    unsafe { simd_le(a, transmute(b)) }
2520}
2521#[doc = "Compare signed less than or equal to zero"]
2522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2523#[inline]
2524#[target_feature(enable = "neon")]
2525#[cfg_attr(test, assert_instr(cmle))]
2526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2527pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2528    let b: i64x1 = i64x1::new(0);
2529    unsafe { simd_le(a, transmute(b)) }
2530}
2531#[doc = "Compare signed less than or equal to zero"]
2532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2533#[inline]
2534#[target_feature(enable = "neon")]
2535#[cfg_attr(test, assert_instr(cmle))]
2536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2537pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2538    let b: i64x2 = i64x2::new(0, 0);
2539    unsafe { simd_le(a, transmute(b)) }
2540}
2541#[doc = "Floating-point compare less than or equal to zero"]
2542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2543#[inline]
2544#[target_feature(enable = "neon")]
2545#[cfg_attr(test, assert_instr(fcmp))]
2546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2547pub fn vclezd_f64(a: f64) -> u64 {
2548    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2549}
2550#[doc = "Floating-point compare less than or equal to zero"]
2551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2552#[inline]
2553#[target_feature(enable = "neon")]
2554#[cfg_attr(test, assert_instr(fcmp))]
2555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2556pub fn vclezs_f32(a: f32) -> u32 {
2557    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2558}
2559#[doc = "Compare less than or equal to zero"]
2560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2561#[inline]
2562#[target_feature(enable = "neon")]
2563#[cfg_attr(test, assert_instr(cmp))]
2564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2565pub fn vclezd_s64(a: i64) -> u64 {
2566    unsafe { transmute(vclez_s64(transmute(a))) }
2567}
2568#[doc = "Floating-point compare less than or equal to zero"]
2569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2570#[inline]
2571#[cfg_attr(test, assert_instr(fcmp))]
2572#[target_feature(enable = "neon,fp16")]
2573#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2574#[cfg(not(target_arch = "arm64ec"))]
2575pub fn vclezh_f16(a: f16) -> u16 {
2576    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2577}
2578#[doc = "Floating-point compare less than"]
2579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2580#[inline]
2581#[target_feature(enable = "neon")]
2582#[cfg_attr(test, assert_instr(fcmgt))]
2583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2584pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2585    unsafe { simd_lt(a, b) }
2586}
2587#[doc = "Floating-point compare less than"]
2588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2589#[inline]
2590#[target_feature(enable = "neon")]
2591#[cfg_attr(test, assert_instr(fcmgt))]
2592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2593pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2594    unsafe { simd_lt(a, b) }
2595}
2596#[doc = "Compare signed less than"]
2597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2598#[inline]
2599#[target_feature(enable = "neon")]
2600#[cfg_attr(test, assert_instr(cmgt))]
2601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2602pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2603    unsafe { simd_lt(a, b) }
2604}
2605#[doc = "Compare signed less than"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2607#[inline]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(cmgt))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2612    unsafe { simd_lt(a, b) }
2613}
2614#[doc = "Compare unsigned less than"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2616#[inline]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(cmhi))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2621    unsafe { simd_lt(a, b) }
2622}
2623#[doc = "Compare unsigned less than"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2625#[inline]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmhi))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2630    unsafe { simd_lt(a, b) }
2631}
2632#[doc = "Compare less than"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2634#[inline]
2635#[target_feature(enable = "neon")]
2636#[cfg_attr(test, assert_instr(cmp))]
2637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2638pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2639    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2640}
2641#[doc = "Compare less than"]
2642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2643#[inline]
2644#[target_feature(enable = "neon")]
2645#[cfg_attr(test, assert_instr(cmp))]
2646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2647pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2648    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2649}
2650#[doc = "Floating-point compare less than"]
2651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2652#[inline]
2653#[cfg_attr(test, assert_instr(fcmp))]
2654#[target_feature(enable = "neon,fp16")]
2655#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2656#[cfg(not(target_arch = "arm64ec"))]
2657pub fn vclth_f16(a: f16, b: f16) -> u16 {
2658    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2659}
2660#[doc = "Floating-point compare less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2662#[inline]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(fcmp))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclts_f32(a: f32, b: f32) -> u32 {
2667    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2668}
2669#[doc = "Floating-point compare less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2671#[inline]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(fcmp))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2676    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2677}
2678#[doc = "Floating-point compare less than zero"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2680#[inline]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(fcmlt))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2685    let b: f32x2 = f32x2::new(0.0, 0.0);
2686    unsafe { simd_lt(a, transmute(b)) }
2687}
2688#[doc = "Floating-point compare less than zero"]
2689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2690#[inline]
2691#[target_feature(enable = "neon")]
2692#[cfg_attr(test, assert_instr(fcmlt))]
2693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2694pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2695    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2696    unsafe { simd_lt(a, transmute(b)) }
2697}
2698#[doc = "Floating-point compare less than zero"]
2699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2700#[inline]
2701#[target_feature(enable = "neon")]
2702#[cfg_attr(test, assert_instr(fcmlt))]
2703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2704pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2705    let b: f64 = 0.0;
2706    unsafe { simd_lt(a, transmute(b)) }
2707}
2708#[doc = "Floating-point compare less than zero"]
2709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2710#[inline]
2711#[target_feature(enable = "neon")]
2712#[cfg_attr(test, assert_instr(fcmlt))]
2713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2714pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2715    let b: f64x2 = f64x2::new(0.0, 0.0);
2716    unsafe { simd_lt(a, transmute(b)) }
2717}
2718#[doc = "Compare signed less than zero"]
2719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2720#[inline]
2721#[target_feature(enable = "neon")]
2722#[cfg_attr(test, assert_instr(cmlt))]
2723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2724pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2725    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2726    unsafe { simd_lt(a, transmute(b)) }
2727}
2728#[doc = "Compare signed less than zero"]
2729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2730#[inline]
2731#[target_feature(enable = "neon")]
2732#[cfg_attr(test, assert_instr(cmlt))]
2733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2734pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2735    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2736    unsafe { simd_lt(a, transmute(b)) }
2737}
2738#[doc = "Compare signed less than zero"]
2739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2740#[inline]
2741#[target_feature(enable = "neon")]
2742#[cfg_attr(test, assert_instr(cmlt))]
2743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2744pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2745    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2746    unsafe { simd_lt(a, transmute(b)) }
2747}
2748#[doc = "Compare signed less than zero"]
2749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2750#[inline]
2751#[target_feature(enable = "neon")]
2752#[cfg_attr(test, assert_instr(cmlt))]
2753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2754pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2755    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2756    unsafe { simd_lt(a, transmute(b)) }
2757}
2758#[doc = "Compare signed less than zero"]
2759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2760#[inline]
2761#[target_feature(enable = "neon")]
2762#[cfg_attr(test, assert_instr(cmlt))]
2763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2764pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2765    let b: i32x2 = i32x2::new(0, 0);
2766    unsafe { simd_lt(a, transmute(b)) }
2767}
2768#[doc = "Compare signed less than zero"]
2769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2770#[inline]
2771#[target_feature(enable = "neon")]
2772#[cfg_attr(test, assert_instr(cmlt))]
2773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2774pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2775    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2776    unsafe { simd_lt(a, transmute(b)) }
2777}
2778#[doc = "Compare signed less than zero"]
2779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2780#[inline]
2781#[target_feature(enable = "neon")]
2782#[cfg_attr(test, assert_instr(cmlt))]
2783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2784pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2785    let b: i64x1 = i64x1::new(0);
2786    unsafe { simd_lt(a, transmute(b)) }
2787}
2788#[doc = "Compare signed less than zero"]
2789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2790#[inline]
2791#[target_feature(enable = "neon")]
2792#[cfg_attr(test, assert_instr(cmlt))]
2793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2794pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2795    let b: i64x2 = i64x2::new(0, 0);
2796    unsafe { simd_lt(a, transmute(b)) }
2797}
2798#[doc = "Floating-point compare less than zero"]
2799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2800#[inline]
2801#[target_feature(enable = "neon")]
2802#[cfg_attr(test, assert_instr(fcmp))]
2803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2804pub fn vcltzd_f64(a: f64) -> u64 {
2805    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2806}
2807#[doc = "Floating-point compare less than zero"]
2808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2809#[inline]
2810#[target_feature(enable = "neon")]
2811#[cfg_attr(test, assert_instr(fcmp))]
2812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2813pub fn vcltzs_f32(a: f32) -> u32 {
2814    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2815}
2816#[doc = "Compare less than zero"]
2817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2818#[inline]
2819#[target_feature(enable = "neon")]
2820#[cfg_attr(test, assert_instr(asr))]
2821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2822pub fn vcltzd_s64(a: i64) -> u64 {
2823    unsafe { transmute(vcltz_s64(transmute(a))) }
2824}
2825#[doc = "Floating-point compare less than zero"]
2826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2827#[inline]
2828#[cfg_attr(test, assert_instr(fcmp))]
2829#[target_feature(enable = "neon,fp16")]
2830#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2831#[cfg(not(target_arch = "arm64ec"))]
2832pub fn vcltzh_f16(a: f16) -> u16 {
2833    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2834}
2835#[doc = "Floating-point complex multiply accumulate"]
2836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2837#[inline]
2838#[target_feature(enable = "neon,fcma")]
2839#[target_feature(enable = "neon,fp16")]
2840#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2841#[cfg(not(target_arch = "arm64ec"))]
2842#[cfg_attr(test, assert_instr(fcmla))]
2843pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2844    unsafe extern "unadjusted" {
2845        #[cfg_attr(
2846            any(target_arch = "aarch64", target_arch = "arm64ec"),
2847            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2848        )]
2849        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2850    }
2851    unsafe { _vcmla_f16(a, b, c) }
2852}
2853#[doc = "Floating-point complex multiply accumulate"]
2854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2855#[inline]
2856#[target_feature(enable = "neon,fcma")]
2857#[target_feature(enable = "neon,fp16")]
2858#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2859#[cfg(not(target_arch = "arm64ec"))]
2860#[cfg_attr(test, assert_instr(fcmla))]
2861pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2862    unsafe extern "unadjusted" {
2863        #[cfg_attr(
2864            any(target_arch = "aarch64", target_arch = "arm64ec"),
2865            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2866        )]
2867        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2868    }
2869    unsafe { _vcmlaq_f16(a, b, c) }
2870}
2871#[doc = "Floating-point complex multiply accumulate"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2873#[inline]
2874#[target_feature(enable = "neon,fcma")]
2875#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2876#[cfg_attr(test, assert_instr(fcmla))]
2877pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2878    unsafe extern "unadjusted" {
2879        #[cfg_attr(
2880            any(target_arch = "aarch64", target_arch = "arm64ec"),
2881            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2882        )]
2883        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2884    }
2885    unsafe { _vcmla_f32(a, b, c) }
2886}
2887#[doc = "Floating-point complex multiply accumulate"]
2888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2889#[inline]
2890#[target_feature(enable = "neon,fcma")]
2891#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2892#[cfg_attr(test, assert_instr(fcmla))]
2893pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2894    unsafe extern "unadjusted" {
2895        #[cfg_attr(
2896            any(target_arch = "aarch64", target_arch = "arm64ec"),
2897            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2898        )]
2899        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2900    }
2901    unsafe { _vcmlaq_f32(a, b, c) }
2902}
2903#[doc = "Floating-point complex multiply accumulate"]
2904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2905#[inline]
2906#[target_feature(enable = "neon,fcma")]
2907#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2908#[cfg_attr(test, assert_instr(fcmla))]
2909pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2910    unsafe extern "unadjusted" {
2911        #[cfg_attr(
2912            any(target_arch = "aarch64", target_arch = "arm64ec"),
2913            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2914        )]
2915        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2916    }
2917    unsafe { _vcmlaq_f64(a, b, c) }
2918}
2919#[doc = "Floating-point complex multiply accumulate"]
2920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2921#[inline]
2922#[target_feature(enable = "neon,fcma")]
2923#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2924#[rustc_legacy_const_generics(3)]
2925#[target_feature(enable = "neon,fp16")]
2926#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2927#[cfg(not(target_arch = "arm64ec"))]
2928pub fn vcmla_lane_f16<const LANE: i32>(
2929    a: float16x4_t,
2930    b: float16x4_t,
2931    c: float16x4_t,
2932) -> float16x4_t {
2933    static_assert_uimm_bits!(LANE, 1);
2934    unsafe {
2935        let c: float16x4_t = simd_shuffle!(
2936            c,
2937            c,
2938            [
2939                2 * LANE as u32,
2940                2 * LANE as u32 + 1,
2941                2 * LANE as u32,
2942                2 * LANE as u32 + 1
2943            ]
2944        );
2945        vcmla_f16(a, b, c)
2946    }
2947}
2948#[doc = "Floating-point complex multiply accumulate"]
2949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
2950#[inline]
2951#[target_feature(enable = "neon,fcma")]
2952#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2953#[rustc_legacy_const_generics(3)]
2954#[target_feature(enable = "neon,fp16")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg(not(target_arch = "arm64ec"))]
2957pub fn vcmlaq_lane_f16<const LANE: i32>(
2958    a: float16x8_t,
2959    b: float16x8_t,
2960    c: float16x4_t,
2961) -> float16x8_t {
2962    static_assert_uimm_bits!(LANE, 1);
2963    unsafe {
2964        let c: float16x8_t = simd_shuffle!(
2965            c,
2966            c,
2967            [
2968                2 * LANE as u32,
2969                2 * LANE as u32 + 1,
2970                2 * LANE as u32,
2971                2 * LANE as u32 + 1,
2972                2 * LANE as u32,
2973                2 * LANE as u32 + 1,
2974                2 * LANE as u32,
2975                2 * LANE as u32 + 1
2976            ]
2977        );
2978        vcmlaq_f16(a, b, c)
2979    }
2980}
2981#[doc = "Floating-point complex multiply accumulate"]
2982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
2983#[inline]
2984#[target_feature(enable = "neon,fcma")]
2985#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2986#[rustc_legacy_const_generics(3)]
2987#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2988pub fn vcmla_lane_f32<const LANE: i32>(
2989    a: float32x2_t,
2990    b: float32x2_t,
2991    c: float32x2_t,
2992) -> float32x2_t {
2993    static_assert!(LANE == 0);
2994    unsafe {
2995        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
2996        vcmla_f32(a, b, c)
2997    }
2998}
2999#[doc = "Floating-point complex multiply accumulate"]
3000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3001#[inline]
3002#[target_feature(enable = "neon,fcma")]
3003#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3004#[rustc_legacy_const_generics(3)]
3005#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3006pub fn vcmlaq_lane_f32<const LANE: i32>(
3007    a: float32x4_t,
3008    b: float32x4_t,
3009    c: float32x2_t,
3010) -> float32x4_t {
3011    static_assert!(LANE == 0);
3012    unsafe {
3013        let c: float32x4_t = simd_shuffle!(
3014            c,
3015            c,
3016            [
3017                2 * LANE as u32,
3018                2 * LANE as u32 + 1,
3019                2 * LANE as u32,
3020                2 * LANE as u32 + 1
3021            ]
3022        );
3023        vcmlaq_f32(a, b, c)
3024    }
3025}
3026#[doc = "Floating-point complex multiply accumulate"]
3027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3028#[inline]
3029#[target_feature(enable = "neon,fcma")]
3030#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3031#[rustc_legacy_const_generics(3)]
3032#[target_feature(enable = "neon,fp16")]
3033#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3034#[cfg(not(target_arch = "arm64ec"))]
3035pub fn vcmla_laneq_f16<const LANE: i32>(
3036    a: float16x4_t,
3037    b: float16x4_t,
3038    c: float16x8_t,
3039) -> float16x4_t {
3040    static_assert_uimm_bits!(LANE, 2);
3041    unsafe {
3042        let c: float16x4_t = simd_shuffle!(
3043            c,
3044            c,
3045            [
3046                2 * LANE as u32,
3047                2 * LANE as u32 + 1,
3048                2 * LANE as u32,
3049                2 * LANE as u32 + 1
3050            ]
3051        );
3052        vcmla_f16(a, b, c)
3053    }
3054}
3055#[doc = "Floating-point complex multiply accumulate"]
3056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3057#[inline]
3058#[target_feature(enable = "neon,fcma")]
3059#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3060#[rustc_legacy_const_generics(3)]
3061#[target_feature(enable = "neon,fp16")]
3062#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3063#[cfg(not(target_arch = "arm64ec"))]
3064pub fn vcmlaq_laneq_f16<const LANE: i32>(
3065    a: float16x8_t,
3066    b: float16x8_t,
3067    c: float16x8_t,
3068) -> float16x8_t {
3069    static_assert_uimm_bits!(LANE, 2);
3070    unsafe {
3071        let c: float16x8_t = simd_shuffle!(
3072            c,
3073            c,
3074            [
3075                2 * LANE as u32,
3076                2 * LANE as u32 + 1,
3077                2 * LANE as u32,
3078                2 * LANE as u32 + 1,
3079                2 * LANE as u32,
3080                2 * LANE as u32 + 1,
3081                2 * LANE as u32,
3082                2 * LANE as u32 + 1
3083            ]
3084        );
3085        vcmlaq_f16(a, b, c)
3086    }
3087}
3088#[doc = "Floating-point complex multiply accumulate"]
3089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3090#[inline]
3091#[target_feature(enable = "neon,fcma")]
3092#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3093#[rustc_legacy_const_generics(3)]
3094#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3095pub fn vcmla_laneq_f32<const LANE: i32>(
3096    a: float32x2_t,
3097    b: float32x2_t,
3098    c: float32x4_t,
3099) -> float32x2_t {
3100    static_assert_uimm_bits!(LANE, 1);
3101    unsafe {
3102        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3103        vcmla_f32(a, b, c)
3104    }
3105}
3106#[doc = "Floating-point complex multiply accumulate"]
3107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3108#[inline]
3109#[target_feature(enable = "neon,fcma")]
3110#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3111#[rustc_legacy_const_generics(3)]
3112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3113pub fn vcmlaq_laneq_f32<const LANE: i32>(
3114    a: float32x4_t,
3115    b: float32x4_t,
3116    c: float32x4_t,
3117) -> float32x4_t {
3118    static_assert_uimm_bits!(LANE, 1);
3119    unsafe {
3120        let c: float32x4_t = simd_shuffle!(
3121            c,
3122            c,
3123            [
3124                2 * LANE as u32,
3125                2 * LANE as u32 + 1,
3126                2 * LANE as u32,
3127                2 * LANE as u32 + 1
3128            ]
3129        );
3130        vcmlaq_f32(a, b, c)
3131    }
3132}
3133#[doc = "Floating-point complex multiply accumulate"]
3134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3135#[inline]
3136#[target_feature(enable = "neon,fcma")]
3137#[target_feature(enable = "neon,fp16")]
3138#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3139#[cfg(not(target_arch = "arm64ec"))]
3140#[cfg_attr(test, assert_instr(fcmla))]
3141pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3142    unsafe extern "unadjusted" {
3143        #[cfg_attr(
3144            any(target_arch = "aarch64", target_arch = "arm64ec"),
3145            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3146        )]
3147        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3148    }
3149    unsafe { _vcmla_rot180_f16(a, b, c) }
3150}
3151#[doc = "Floating-point complex multiply accumulate"]
3152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3153#[inline]
3154#[target_feature(enable = "neon,fcma")]
3155#[target_feature(enable = "neon,fp16")]
3156#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3157#[cfg(not(target_arch = "arm64ec"))]
3158#[cfg_attr(test, assert_instr(fcmla))]
3159pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3160    unsafe extern "unadjusted" {
3161        #[cfg_attr(
3162            any(target_arch = "aarch64", target_arch = "arm64ec"),
3163            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3164        )]
3165        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3166    }
3167    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3168}
3169#[doc = "Floating-point complex multiply accumulate"]
3170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3171#[inline]
3172#[target_feature(enable = "neon,fcma")]
3173#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3174#[cfg_attr(test, assert_instr(fcmla))]
3175pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3176    unsafe extern "unadjusted" {
3177        #[cfg_attr(
3178            any(target_arch = "aarch64", target_arch = "arm64ec"),
3179            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3180        )]
3181        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3182    }
3183    unsafe { _vcmla_rot180_f32(a, b, c) }
3184}
3185#[doc = "Floating-point complex multiply accumulate"]
3186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3187#[inline]
3188#[target_feature(enable = "neon,fcma")]
3189#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3190#[cfg_attr(test, assert_instr(fcmla))]
3191pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3192    unsafe extern "unadjusted" {
3193        #[cfg_attr(
3194            any(target_arch = "aarch64", target_arch = "arm64ec"),
3195            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3196        )]
3197        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3198    }
3199    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3200}
3201#[doc = "Floating-point complex multiply accumulate"]
3202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3203#[inline]
3204#[target_feature(enable = "neon,fcma")]
3205#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3206#[cfg_attr(test, assert_instr(fcmla))]
3207pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3208    unsafe extern "unadjusted" {
3209        #[cfg_attr(
3210            any(target_arch = "aarch64", target_arch = "arm64ec"),
3211            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3212        )]
3213        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3214    }
3215    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3216}
3217#[doc = "Floating-point complex multiply accumulate"]
3218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3219#[inline]
3220#[target_feature(enable = "neon,fcma")]
3221#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3222#[rustc_legacy_const_generics(3)]
3223#[target_feature(enable = "neon,fp16")]
3224#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3225#[cfg(not(target_arch = "arm64ec"))]
3226pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3227    a: float16x4_t,
3228    b: float16x4_t,
3229    c: float16x4_t,
3230) -> float16x4_t {
3231    static_assert_uimm_bits!(LANE, 1);
3232    unsafe {
3233        let c: float16x4_t = simd_shuffle!(
3234            c,
3235            c,
3236            [
3237                2 * LANE as u32,
3238                2 * LANE as u32 + 1,
3239                2 * LANE as u32,
3240                2 * LANE as u32 + 1
3241            ]
3242        );
3243        vcmla_rot180_f16(a, b, c)
3244    }
3245}
3246#[doc = "Floating-point complex multiply accumulate"]
3247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3248#[inline]
3249#[target_feature(enable = "neon,fcma")]
3250#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3251#[rustc_legacy_const_generics(3)]
3252#[target_feature(enable = "neon,fp16")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg(not(target_arch = "arm64ec"))]
3255pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3256    a: float16x8_t,
3257    b: float16x8_t,
3258    c: float16x4_t,
3259) -> float16x8_t {
3260    static_assert_uimm_bits!(LANE, 1);
3261    unsafe {
3262        let c: float16x8_t = simd_shuffle!(
3263            c,
3264            c,
3265            [
3266                2 * LANE as u32,
3267                2 * LANE as u32 + 1,
3268                2 * LANE as u32,
3269                2 * LANE as u32 + 1,
3270                2 * LANE as u32,
3271                2 * LANE as u32 + 1,
3272                2 * LANE as u32,
3273                2 * LANE as u32 + 1
3274            ]
3275        );
3276        vcmlaq_rot180_f16(a, b, c)
3277    }
3278}
3279#[doc = "Floating-point complex multiply accumulate"]
3280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3281#[inline]
3282#[target_feature(enable = "neon,fcma")]
3283#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3284#[rustc_legacy_const_generics(3)]
3285#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3286pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3287    a: float32x2_t,
3288    b: float32x2_t,
3289    c: float32x2_t,
3290) -> float32x2_t {
3291    static_assert!(LANE == 0);
3292    unsafe {
3293        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3294        vcmla_rot180_f32(a, b, c)
3295    }
3296}
3297#[doc = "Floating-point complex multiply accumulate"]
3298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3299#[inline]
3300#[target_feature(enable = "neon,fcma")]
3301#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3302#[rustc_legacy_const_generics(3)]
3303#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3304pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3305    a: float32x4_t,
3306    b: float32x4_t,
3307    c: float32x2_t,
3308) -> float32x4_t {
3309    static_assert!(LANE == 0);
3310    unsafe {
3311        let c: float32x4_t = simd_shuffle!(
3312            c,
3313            c,
3314            [
3315                2 * LANE as u32,
3316                2 * LANE as u32 + 1,
3317                2 * LANE as u32,
3318                2 * LANE as u32 + 1
3319            ]
3320        );
3321        vcmlaq_rot180_f32(a, b, c)
3322    }
3323}
3324#[doc = "Floating-point complex multiply accumulate"]
3325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3326#[inline]
3327#[target_feature(enable = "neon,fcma")]
3328#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3329#[rustc_legacy_const_generics(3)]
3330#[target_feature(enable = "neon,fp16")]
3331#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3332#[cfg(not(target_arch = "arm64ec"))]
3333pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3334    a: float16x4_t,
3335    b: float16x4_t,
3336    c: float16x8_t,
3337) -> float16x4_t {
3338    static_assert_uimm_bits!(LANE, 2);
3339    unsafe {
3340        let c: float16x4_t = simd_shuffle!(
3341            c,
3342            c,
3343            [
3344                2 * LANE as u32,
3345                2 * LANE as u32 + 1,
3346                2 * LANE as u32,
3347                2 * LANE as u32 + 1
3348            ]
3349        );
3350        vcmla_rot180_f16(a, b, c)
3351    }
3352}
3353#[doc = "Floating-point complex multiply accumulate"]
3354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3355#[inline]
3356#[target_feature(enable = "neon,fcma")]
3357#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3358#[rustc_legacy_const_generics(3)]
3359#[target_feature(enable = "neon,fp16")]
3360#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3361#[cfg(not(target_arch = "arm64ec"))]
3362pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3363    a: float16x8_t,
3364    b: float16x8_t,
3365    c: float16x8_t,
3366) -> float16x8_t {
3367    static_assert_uimm_bits!(LANE, 2);
3368    unsafe {
3369        let c: float16x8_t = simd_shuffle!(
3370            c,
3371            c,
3372            [
3373                2 * LANE as u32,
3374                2 * LANE as u32 + 1,
3375                2 * LANE as u32,
3376                2 * LANE as u32 + 1,
3377                2 * LANE as u32,
3378                2 * LANE as u32 + 1,
3379                2 * LANE as u32,
3380                2 * LANE as u32 + 1
3381            ]
3382        );
3383        vcmlaq_rot180_f16(a, b, c)
3384    }
3385}
3386#[doc = "Floating-point complex multiply accumulate"]
3387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3388#[inline]
3389#[target_feature(enable = "neon,fcma")]
3390#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3391#[rustc_legacy_const_generics(3)]
3392#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3393pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3394    a: float32x2_t,
3395    b: float32x2_t,
3396    c: float32x4_t,
3397) -> float32x2_t {
3398    static_assert_uimm_bits!(LANE, 1);
3399    unsafe {
3400        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3401        vcmla_rot180_f32(a, b, c)
3402    }
3403}
3404#[doc = "Floating-point complex multiply accumulate"]
3405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3406#[inline]
3407#[target_feature(enable = "neon,fcma")]
3408#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3409#[rustc_legacy_const_generics(3)]
3410#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3411pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3412    a: float32x4_t,
3413    b: float32x4_t,
3414    c: float32x4_t,
3415) -> float32x4_t {
3416    static_assert_uimm_bits!(LANE, 1);
3417    unsafe {
3418        let c: float32x4_t = simd_shuffle!(
3419            c,
3420            c,
3421            [
3422                2 * LANE as u32,
3423                2 * LANE as u32 + 1,
3424                2 * LANE as u32,
3425                2 * LANE as u32 + 1
3426            ]
3427        );
3428        vcmlaq_rot180_f32(a, b, c)
3429    }
3430}
3431#[doc = "Floating-point complex multiply accumulate"]
3432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3433#[inline]
3434#[target_feature(enable = "neon,fcma")]
3435#[target_feature(enable = "neon,fp16")]
3436#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3437#[cfg(not(target_arch = "arm64ec"))]
3438#[cfg_attr(test, assert_instr(fcmla))]
3439pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3440    unsafe extern "unadjusted" {
3441        #[cfg_attr(
3442            any(target_arch = "aarch64", target_arch = "arm64ec"),
3443            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3444        )]
3445        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3446    }
3447    unsafe { _vcmla_rot270_f16(a, b, c) }
3448}
3449#[doc = "Floating-point complex multiply accumulate"]
3450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3451#[inline]
3452#[target_feature(enable = "neon,fcma")]
3453#[target_feature(enable = "neon,fp16")]
3454#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3455#[cfg(not(target_arch = "arm64ec"))]
3456#[cfg_attr(test, assert_instr(fcmla))]
3457pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3458    unsafe extern "unadjusted" {
3459        #[cfg_attr(
3460            any(target_arch = "aarch64", target_arch = "arm64ec"),
3461            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3462        )]
3463        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3464    }
3465    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3466}
3467#[doc = "Floating-point complex multiply accumulate"]
3468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3469#[inline]
3470#[target_feature(enable = "neon,fcma")]
3471#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3472#[cfg_attr(test, assert_instr(fcmla))]
3473pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3474    unsafe extern "unadjusted" {
3475        #[cfg_attr(
3476            any(target_arch = "aarch64", target_arch = "arm64ec"),
3477            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3478        )]
3479        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3480    }
3481    unsafe { _vcmla_rot270_f32(a, b, c) }
3482}
3483#[doc = "Floating-point complex multiply accumulate"]
3484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3485#[inline]
3486#[target_feature(enable = "neon,fcma")]
3487#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3488#[cfg_attr(test, assert_instr(fcmla))]
3489pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3490    unsafe extern "unadjusted" {
3491        #[cfg_attr(
3492            any(target_arch = "aarch64", target_arch = "arm64ec"),
3493            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3494        )]
3495        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3496    }
3497    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3498}
3499#[doc = "Floating-point complex multiply accumulate"]
3500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3501#[inline]
3502#[target_feature(enable = "neon,fcma")]
3503#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3504#[cfg_attr(test, assert_instr(fcmla))]
3505pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3506    unsafe extern "unadjusted" {
3507        #[cfg_attr(
3508            any(target_arch = "aarch64", target_arch = "arm64ec"),
3509            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3510        )]
3511        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3512    }
3513    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3514}
3515#[doc = "Floating-point complex multiply accumulate"]
3516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3517#[inline]
3518#[target_feature(enable = "neon,fcma")]
3519#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3520#[rustc_legacy_const_generics(3)]
3521#[target_feature(enable = "neon,fp16")]
3522#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3523#[cfg(not(target_arch = "arm64ec"))]
3524pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3525    a: float16x4_t,
3526    b: float16x4_t,
3527    c: float16x4_t,
3528) -> float16x4_t {
3529    static_assert_uimm_bits!(LANE, 1);
3530    unsafe {
3531        let c: float16x4_t = simd_shuffle!(
3532            c,
3533            c,
3534            [
3535                2 * LANE as u32,
3536                2 * LANE as u32 + 1,
3537                2 * LANE as u32,
3538                2 * LANE as u32 + 1
3539            ]
3540        );
3541        vcmla_rot270_f16(a, b, c)
3542    }
3543}
3544#[doc = "Floating-point complex multiply accumulate"]
3545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3546#[inline]
3547#[target_feature(enable = "neon,fcma")]
3548#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3549#[rustc_legacy_const_generics(3)]
3550#[target_feature(enable = "neon,fp16")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg(not(target_arch = "arm64ec"))]
3553pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3554    a: float16x8_t,
3555    b: float16x8_t,
3556    c: float16x4_t,
3557) -> float16x8_t {
3558    static_assert_uimm_bits!(LANE, 1);
3559    unsafe {
3560        let c: float16x8_t = simd_shuffle!(
3561            c,
3562            c,
3563            [
3564                2 * LANE as u32,
3565                2 * LANE as u32 + 1,
3566                2 * LANE as u32,
3567                2 * LANE as u32 + 1,
3568                2 * LANE as u32,
3569                2 * LANE as u32 + 1,
3570                2 * LANE as u32,
3571                2 * LANE as u32 + 1
3572            ]
3573        );
3574        vcmlaq_rot270_f16(a, b, c)
3575    }
3576}
3577#[doc = "Floating-point complex multiply accumulate"]
3578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3579#[inline]
3580#[target_feature(enable = "neon,fcma")]
3581#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3582#[rustc_legacy_const_generics(3)]
3583#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3584pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3585    a: float32x2_t,
3586    b: float32x2_t,
3587    c: float32x2_t,
3588) -> float32x2_t {
3589    static_assert!(LANE == 0);
3590    unsafe {
3591        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3592        vcmla_rot270_f32(a, b, c)
3593    }
3594}
3595#[doc = "Floating-point complex multiply accumulate"]
3596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3597#[inline]
3598#[target_feature(enable = "neon,fcma")]
3599#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3600#[rustc_legacy_const_generics(3)]
3601#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3602pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3603    a: float32x4_t,
3604    b: float32x4_t,
3605    c: float32x2_t,
3606) -> float32x4_t {
3607    static_assert!(LANE == 0);
3608    unsafe {
3609        let c: float32x4_t = simd_shuffle!(
3610            c,
3611            c,
3612            [
3613                2 * LANE as u32,
3614                2 * LANE as u32 + 1,
3615                2 * LANE as u32,
3616                2 * LANE as u32 + 1
3617            ]
3618        );
3619        vcmlaq_rot270_f32(a, b, c)
3620    }
3621}
3622#[doc = "Floating-point complex multiply accumulate"]
3623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3624#[inline]
3625#[target_feature(enable = "neon,fcma")]
3626#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3627#[rustc_legacy_const_generics(3)]
3628#[target_feature(enable = "neon,fp16")]
3629#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3630#[cfg(not(target_arch = "arm64ec"))]
3631pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3632    a: float16x4_t,
3633    b: float16x4_t,
3634    c: float16x8_t,
3635) -> float16x4_t {
3636    static_assert_uimm_bits!(LANE, 2);
3637    unsafe {
3638        let c: float16x4_t = simd_shuffle!(
3639            c,
3640            c,
3641            [
3642                2 * LANE as u32,
3643                2 * LANE as u32 + 1,
3644                2 * LANE as u32,
3645                2 * LANE as u32 + 1
3646            ]
3647        );
3648        vcmla_rot270_f16(a, b, c)
3649    }
3650}
3651#[doc = "Floating-point complex multiply accumulate"]
3652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3653#[inline]
3654#[target_feature(enable = "neon,fcma")]
3655#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3656#[rustc_legacy_const_generics(3)]
3657#[target_feature(enable = "neon,fp16")]
3658#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3659#[cfg(not(target_arch = "arm64ec"))]
3660pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3661    a: float16x8_t,
3662    b: float16x8_t,
3663    c: float16x8_t,
3664) -> float16x8_t {
3665    static_assert_uimm_bits!(LANE, 2);
3666    unsafe {
3667        let c: float16x8_t = simd_shuffle!(
3668            c,
3669            c,
3670            [
3671                2 * LANE as u32,
3672                2 * LANE as u32 + 1,
3673                2 * LANE as u32,
3674                2 * LANE as u32 + 1,
3675                2 * LANE as u32,
3676                2 * LANE as u32 + 1,
3677                2 * LANE as u32,
3678                2 * LANE as u32 + 1
3679            ]
3680        );
3681        vcmlaq_rot270_f16(a, b, c)
3682    }
3683}
3684#[doc = "Floating-point complex multiply accumulate"]
3685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3686#[inline]
3687#[target_feature(enable = "neon,fcma")]
3688#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3689#[rustc_legacy_const_generics(3)]
3690#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3691pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3692    a: float32x2_t,
3693    b: float32x2_t,
3694    c: float32x4_t,
3695) -> float32x2_t {
3696    static_assert_uimm_bits!(LANE, 1);
3697    unsafe {
3698        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3699        vcmla_rot270_f32(a, b, c)
3700    }
3701}
3702#[doc = "Floating-point complex multiply accumulate"]
3703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3704#[inline]
3705#[target_feature(enable = "neon,fcma")]
3706#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3707#[rustc_legacy_const_generics(3)]
3708#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3709pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3710    a: float32x4_t,
3711    b: float32x4_t,
3712    c: float32x4_t,
3713) -> float32x4_t {
3714    static_assert_uimm_bits!(LANE, 1);
3715    unsafe {
3716        let c: float32x4_t = simd_shuffle!(
3717            c,
3718            c,
3719            [
3720                2 * LANE as u32,
3721                2 * LANE as u32 + 1,
3722                2 * LANE as u32,
3723                2 * LANE as u32 + 1
3724            ]
3725        );
3726        vcmlaq_rot270_f32(a, b, c)
3727    }
3728}
3729#[doc = "Floating-point complex multiply accumulate"]
3730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3731#[inline]
3732#[target_feature(enable = "neon,fcma")]
3733#[target_feature(enable = "neon,fp16")]
3734#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3735#[cfg(not(target_arch = "arm64ec"))]
3736#[cfg_attr(test, assert_instr(fcmla))]
3737pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3738    unsafe extern "unadjusted" {
3739        #[cfg_attr(
3740            any(target_arch = "aarch64", target_arch = "arm64ec"),
3741            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3742        )]
3743        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3744    }
3745    unsafe { _vcmla_rot90_f16(a, b, c) }
3746}
3747#[doc = "Floating-point complex multiply accumulate"]
3748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3749#[inline]
3750#[target_feature(enable = "neon,fcma")]
3751#[target_feature(enable = "neon,fp16")]
3752#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3753#[cfg(not(target_arch = "arm64ec"))]
3754#[cfg_attr(test, assert_instr(fcmla))]
3755pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3756    unsafe extern "unadjusted" {
3757        #[cfg_attr(
3758            any(target_arch = "aarch64", target_arch = "arm64ec"),
3759            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3760        )]
3761        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3762    }
3763    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3764}
3765#[doc = "Floating-point complex multiply accumulate"]
3766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3767#[inline]
3768#[target_feature(enable = "neon,fcma")]
3769#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3770#[cfg_attr(test, assert_instr(fcmla))]
3771pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3772    unsafe extern "unadjusted" {
3773        #[cfg_attr(
3774            any(target_arch = "aarch64", target_arch = "arm64ec"),
3775            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3776        )]
3777        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3778    }
3779    unsafe { _vcmla_rot90_f32(a, b, c) }
3780}
3781#[doc = "Floating-point complex multiply accumulate"]
3782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3783#[inline]
3784#[target_feature(enable = "neon,fcma")]
3785#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3786#[cfg_attr(test, assert_instr(fcmla))]
3787pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3788    unsafe extern "unadjusted" {
3789        #[cfg_attr(
3790            any(target_arch = "aarch64", target_arch = "arm64ec"),
3791            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3792        )]
3793        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3794    }
3795    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3796}
3797#[doc = "Floating-point complex multiply accumulate"]
3798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3799#[inline]
3800#[target_feature(enable = "neon,fcma")]
3801#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3802#[cfg_attr(test, assert_instr(fcmla))]
3803pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3804    unsafe extern "unadjusted" {
3805        #[cfg_attr(
3806            any(target_arch = "aarch64", target_arch = "arm64ec"),
3807            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3808        )]
3809        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3810    }
3811    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3812}
3813#[doc = "Floating-point complex multiply accumulate"]
3814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3815#[inline]
3816#[target_feature(enable = "neon,fcma")]
3817#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3818#[rustc_legacy_const_generics(3)]
3819#[target_feature(enable = "neon,fp16")]
3820#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3821#[cfg(not(target_arch = "arm64ec"))]
3822pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3823    a: float16x4_t,
3824    b: float16x4_t,
3825    c: float16x4_t,
3826) -> float16x4_t {
3827    static_assert_uimm_bits!(LANE, 1);
3828    unsafe {
3829        let c: float16x4_t = simd_shuffle!(
3830            c,
3831            c,
3832            [
3833                2 * LANE as u32,
3834                2 * LANE as u32 + 1,
3835                2 * LANE as u32,
3836                2 * LANE as u32 + 1
3837            ]
3838        );
3839        vcmla_rot90_f16(a, b, c)
3840    }
3841}
3842#[doc = "Floating-point complex multiply accumulate"]
3843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3844#[inline]
3845#[target_feature(enable = "neon,fcma")]
3846#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3847#[rustc_legacy_const_generics(3)]
3848#[target_feature(enable = "neon,fp16")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg(not(target_arch = "arm64ec"))]
3851pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3852    a: float16x8_t,
3853    b: float16x8_t,
3854    c: float16x4_t,
3855) -> float16x8_t {
3856    static_assert_uimm_bits!(LANE, 1);
3857    unsafe {
3858        let c: float16x8_t = simd_shuffle!(
3859            c,
3860            c,
3861            [
3862                2 * LANE as u32,
3863                2 * LANE as u32 + 1,
3864                2 * LANE as u32,
3865                2 * LANE as u32 + 1,
3866                2 * LANE as u32,
3867                2 * LANE as u32 + 1,
3868                2 * LANE as u32,
3869                2 * LANE as u32 + 1
3870            ]
3871        );
3872        vcmlaq_rot90_f16(a, b, c)
3873    }
3874}
3875#[doc = "Floating-point complex multiply accumulate"]
3876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3877#[inline]
3878#[target_feature(enable = "neon,fcma")]
3879#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3880#[rustc_legacy_const_generics(3)]
3881#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3882pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3883    a: float32x2_t,
3884    b: float32x2_t,
3885    c: float32x2_t,
3886) -> float32x2_t {
3887    static_assert!(LANE == 0);
3888    unsafe {
3889        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3890        vcmla_rot90_f32(a, b, c)
3891    }
3892}
3893#[doc = "Floating-point complex multiply accumulate"]
3894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3895#[inline]
3896#[target_feature(enable = "neon,fcma")]
3897#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3898#[rustc_legacy_const_generics(3)]
3899#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3900pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3901    a: float32x4_t,
3902    b: float32x4_t,
3903    c: float32x2_t,
3904) -> float32x4_t {
3905    static_assert!(LANE == 0);
3906    unsafe {
3907        let c: float32x4_t = simd_shuffle!(
3908            c,
3909            c,
3910            [
3911                2 * LANE as u32,
3912                2 * LANE as u32 + 1,
3913                2 * LANE as u32,
3914                2 * LANE as u32 + 1
3915            ]
3916        );
3917        vcmlaq_rot90_f32(a, b, c)
3918    }
3919}
3920#[doc = "Floating-point complex multiply accumulate"]
3921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3922#[inline]
3923#[target_feature(enable = "neon,fcma")]
3924#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3925#[rustc_legacy_const_generics(3)]
3926#[target_feature(enable = "neon,fp16")]
3927#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3928#[cfg(not(target_arch = "arm64ec"))]
3929pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3930    a: float16x4_t,
3931    b: float16x4_t,
3932    c: float16x8_t,
3933) -> float16x4_t {
3934    static_assert_uimm_bits!(LANE, 2);
3935    unsafe {
3936        let c: float16x4_t = simd_shuffle!(
3937            c,
3938            c,
3939            [
3940                2 * LANE as u32,
3941                2 * LANE as u32 + 1,
3942                2 * LANE as u32,
3943                2 * LANE as u32 + 1
3944            ]
3945        );
3946        vcmla_rot90_f16(a, b, c)
3947    }
3948}
3949#[doc = "Floating-point complex multiply accumulate"]
3950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3951#[inline]
3952#[target_feature(enable = "neon,fcma")]
3953#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3954#[rustc_legacy_const_generics(3)]
3955#[target_feature(enable = "neon,fp16")]
3956#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3957#[cfg(not(target_arch = "arm64ec"))]
3958pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
3959    a: float16x8_t,
3960    b: float16x8_t,
3961    c: float16x8_t,
3962) -> float16x8_t {
3963    static_assert_uimm_bits!(LANE, 2);
3964    unsafe {
3965        let c: float16x8_t = simd_shuffle!(
3966            c,
3967            c,
3968            [
3969                2 * LANE as u32,
3970                2 * LANE as u32 + 1,
3971                2 * LANE as u32,
3972                2 * LANE as u32 + 1,
3973                2 * LANE as u32,
3974                2 * LANE as u32 + 1,
3975                2 * LANE as u32,
3976                2 * LANE as u32 + 1
3977            ]
3978        );
3979        vcmlaq_rot90_f16(a, b, c)
3980    }
3981}
3982#[doc = "Floating-point complex multiply accumulate"]
3983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
3984#[inline]
3985#[target_feature(enable = "neon,fcma")]
3986#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3987#[rustc_legacy_const_generics(3)]
3988#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3989pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
3990    a: float32x2_t,
3991    b: float32x2_t,
3992    c: float32x4_t,
3993) -> float32x2_t {
3994    static_assert_uimm_bits!(LANE, 1);
3995    unsafe {
3996        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3997        vcmla_rot90_f32(a, b, c)
3998    }
3999}
4000#[doc = "Floating-point complex multiply accumulate"]
4001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4002#[inline]
4003#[target_feature(enable = "neon,fcma")]
4004#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4005#[rustc_legacy_const_generics(3)]
4006#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4007pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4008    a: float32x4_t,
4009    b: float32x4_t,
4010    c: float32x4_t,
4011) -> float32x4_t {
4012    static_assert_uimm_bits!(LANE, 1);
4013    unsafe {
4014        let c: float32x4_t = simd_shuffle!(
4015            c,
4016            c,
4017            [
4018                2 * LANE as u32,
4019                2 * LANE as u32 + 1,
4020                2 * LANE as u32,
4021                2 * LANE as u32 + 1
4022            ]
4023        );
4024        vcmlaq_rot90_f32(a, b, c)
4025    }
4026}
4027#[doc = "Insert vector element from another vector element"]
4028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4029#[inline]
4030#[target_feature(enable = "neon")]
4031#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4032#[rustc_legacy_const_generics(1, 3)]
4033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4034pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4035    a: float32x2_t,
4036    b: float32x2_t,
4037) -> float32x2_t {
4038    static_assert_uimm_bits!(LANE1, 1);
4039    static_assert_uimm_bits!(LANE2, 1);
4040    unsafe {
4041        match LANE1 & 0b1 {
4042            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4043            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4044            _ => unreachable_unchecked(),
4045        }
4046    }
4047}
4048#[doc = "Insert vector element from another vector element"]
4049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4050#[inline]
4051#[target_feature(enable = "neon")]
4052#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4053#[rustc_legacy_const_generics(1, 3)]
4054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4055pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4056    static_assert_uimm_bits!(LANE1, 3);
4057    static_assert_uimm_bits!(LANE2, 3);
4058    unsafe {
4059        match LANE1 & 0b111 {
4060            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4061            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4062            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4063            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4064            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4065            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4066            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4067            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4068            _ => unreachable_unchecked(),
4069        }
4070    }
4071}
4072#[doc = "Insert vector element from another vector element"]
4073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4074#[inline]
4075#[target_feature(enable = "neon")]
4076#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4077#[rustc_legacy_const_generics(1, 3)]
4078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4079pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4080    static_assert_uimm_bits!(LANE1, 2);
4081    static_assert_uimm_bits!(LANE2, 2);
4082    unsafe {
4083        match LANE1 & 0b11 {
4084            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4085            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4086            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4087            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4088            _ => unreachable_unchecked(),
4089        }
4090    }
4091}
4092#[doc = "Insert vector element from another vector element"]
4093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4094#[inline]
4095#[target_feature(enable = "neon")]
4096#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4097#[rustc_legacy_const_generics(1, 3)]
4098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4099pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4100    static_assert_uimm_bits!(LANE1, 1);
4101    static_assert_uimm_bits!(LANE2, 1);
4102    unsafe {
4103        match LANE1 & 0b1 {
4104            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4105            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4106            _ => unreachable_unchecked(),
4107        }
4108    }
4109}
4110#[doc = "Insert vector element from another vector element"]
4111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4112#[inline]
4113#[target_feature(enable = "neon")]
4114#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4115#[rustc_legacy_const_generics(1, 3)]
4116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4117pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4118    static_assert_uimm_bits!(LANE1, 3);
4119    static_assert_uimm_bits!(LANE2, 3);
4120    unsafe {
4121        match LANE1 & 0b111 {
4122            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4123            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4124            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4125            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4126            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4127            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4128            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4129            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4130            _ => unreachable_unchecked(),
4131        }
4132    }
4133}
4134#[doc = "Insert vector element from another vector element"]
4135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4136#[inline]
4137#[target_feature(enable = "neon")]
4138#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4139#[rustc_legacy_const_generics(1, 3)]
4140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4141pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4142    a: uint16x4_t,
4143    b: uint16x4_t,
4144) -> uint16x4_t {
4145    static_assert_uimm_bits!(LANE1, 2);
4146    static_assert_uimm_bits!(LANE2, 2);
4147    unsafe {
4148        match LANE1 & 0b11 {
4149            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4150            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4151            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4152            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4153            _ => unreachable_unchecked(),
4154        }
4155    }
4156}
4157#[doc = "Insert vector element from another vector element"]
4158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4159#[inline]
4160#[target_feature(enable = "neon")]
4161#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4162#[rustc_legacy_const_generics(1, 3)]
4163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4164pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4165    a: uint32x2_t,
4166    b: uint32x2_t,
4167) -> uint32x2_t {
4168    static_assert_uimm_bits!(LANE1, 1);
4169    static_assert_uimm_bits!(LANE2, 1);
4170    unsafe {
4171        match LANE1 & 0b1 {
4172            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4173            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4174            _ => unreachable_unchecked(),
4175        }
4176    }
4177}
4178#[doc = "Insert vector element from another vector element"]
4179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4180#[inline]
4181#[target_feature(enable = "neon")]
4182#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4183#[rustc_legacy_const_generics(1, 3)]
4184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4185pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4186    static_assert_uimm_bits!(LANE1, 3);
4187    static_assert_uimm_bits!(LANE2, 3);
4188    unsafe {
4189        match LANE1 & 0b111 {
4190            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4191            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4192            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4193            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4194            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4195            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4196            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4197            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4198            _ => unreachable_unchecked(),
4199        }
4200    }
4201}
4202#[doc = "Insert vector element from another vector element"]
4203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4204#[inline]
4205#[target_feature(enable = "neon")]
4206#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4207#[rustc_legacy_const_generics(1, 3)]
4208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4209pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4210    a: poly16x4_t,
4211    b: poly16x4_t,
4212) -> poly16x4_t {
4213    static_assert_uimm_bits!(LANE1, 2);
4214    static_assert_uimm_bits!(LANE2, 2);
4215    unsafe {
4216        match LANE1 & 0b11 {
4217            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4218            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4219            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4220            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4221            _ => unreachable_unchecked(),
4222        }
4223    }
4224}
4225#[doc = "Insert vector element from another vector element"]
4226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4227#[inline]
4228#[target_feature(enable = "neon")]
4229#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4230#[rustc_legacy_const_generics(1, 3)]
4231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4232pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4233    a: float32x2_t,
4234    b: float32x4_t,
4235) -> float32x2_t {
4236    static_assert_uimm_bits!(LANE1, 1);
4237    static_assert_uimm_bits!(LANE2, 2);
4238    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4239    unsafe {
4240        match LANE1 & 0b1 {
4241            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4242            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4243            _ => unreachable_unchecked(),
4244        }
4245    }
4246}
4247#[doc = "Insert vector element from another vector element"]
4248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4249#[inline]
4250#[target_feature(enable = "neon")]
4251#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4252#[rustc_legacy_const_generics(1, 3)]
4253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4254pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4255    static_assert_uimm_bits!(LANE1, 3);
4256    static_assert_uimm_bits!(LANE2, 4);
4257    let a: int8x16_t =
4258        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4259    unsafe {
4260        match LANE1 & 0b111 {
4261            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4262            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4263            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4264            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4265            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4266            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4267            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4268            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4269            _ => unreachable_unchecked(),
4270        }
4271    }
4272}
4273#[doc = "Insert vector element from another vector element"]
4274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4275#[inline]
4276#[target_feature(enable = "neon")]
4277#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4278#[rustc_legacy_const_generics(1, 3)]
4279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4280pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4281    a: int16x4_t,
4282    b: int16x8_t,
4283) -> int16x4_t {
4284    static_assert_uimm_bits!(LANE1, 2);
4285    static_assert_uimm_bits!(LANE2, 3);
4286    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4287    unsafe {
4288        match LANE1 & 0b11 {
4289            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4290            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4291            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4292            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4293            _ => unreachable_unchecked(),
4294        }
4295    }
4296}
4297#[doc = "Insert vector element from another vector element"]
4298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4299#[inline]
4300#[target_feature(enable = "neon")]
4301#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4302#[rustc_legacy_const_generics(1, 3)]
4303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4304pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4305    a: int32x2_t,
4306    b: int32x4_t,
4307) -> int32x2_t {
4308    static_assert_uimm_bits!(LANE1, 1);
4309    static_assert_uimm_bits!(LANE2, 2);
4310    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4311    unsafe {
4312        match LANE1 & 0b1 {
4313            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4314            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4315            _ => unreachable_unchecked(),
4316        }
4317    }
4318}
4319#[doc = "Insert vector element from another vector element"]
4320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4321#[inline]
4322#[target_feature(enable = "neon")]
4323#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4324#[rustc_legacy_const_generics(1, 3)]
4325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4326pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4327    a: uint8x8_t,
4328    b: uint8x16_t,
4329) -> uint8x8_t {
4330    static_assert_uimm_bits!(LANE1, 3);
4331    static_assert_uimm_bits!(LANE2, 4);
4332    let a: uint8x16_t =
4333        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4334    unsafe {
4335        match LANE1 & 0b111 {
4336            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4337            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4338            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4339            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4340            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4341            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4342            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4343            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4344            _ => unreachable_unchecked(),
4345        }
4346    }
4347}
4348#[doc = "Insert vector element from another vector element"]
4349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4350#[inline]
4351#[target_feature(enable = "neon")]
4352#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4353#[rustc_legacy_const_generics(1, 3)]
4354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4355pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4356    a: uint16x4_t,
4357    b: uint16x8_t,
4358) -> uint16x4_t {
4359    static_assert_uimm_bits!(LANE1, 2);
4360    static_assert_uimm_bits!(LANE2, 3);
4361    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4362    unsafe {
4363        match LANE1 & 0b11 {
4364            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4365            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4366            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4367            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4368            _ => unreachable_unchecked(),
4369        }
4370    }
4371}
4372#[doc = "Insert vector element from another vector element"]
4373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4374#[inline]
4375#[target_feature(enable = "neon")]
4376#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4377#[rustc_legacy_const_generics(1, 3)]
4378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4379pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4380    a: uint32x2_t,
4381    b: uint32x4_t,
4382) -> uint32x2_t {
4383    static_assert_uimm_bits!(LANE1, 1);
4384    static_assert_uimm_bits!(LANE2, 2);
4385    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4386    unsafe {
4387        match LANE1 & 0b1 {
4388            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4389            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4390            _ => unreachable_unchecked(),
4391        }
4392    }
4393}
4394#[doc = "Insert vector element from another vector element"]
4395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4396#[inline]
4397#[target_feature(enable = "neon")]
4398#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4399#[rustc_legacy_const_generics(1, 3)]
4400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4401pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4402    a: poly8x8_t,
4403    b: poly8x16_t,
4404) -> poly8x8_t {
4405    static_assert_uimm_bits!(LANE1, 3);
4406    static_assert_uimm_bits!(LANE2, 4);
4407    let a: poly8x16_t =
4408        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4409    unsafe {
4410        match LANE1 & 0b111 {
4411            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4412            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4413            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4414            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4415            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4416            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4417            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4418            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4419            _ => unreachable_unchecked(),
4420        }
4421    }
4422}
4423#[doc = "Insert vector element from another vector element"]
4424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4425#[inline]
4426#[target_feature(enable = "neon")]
4427#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4428#[rustc_legacy_const_generics(1, 3)]
4429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4430pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4431    a: poly16x4_t,
4432    b: poly16x8_t,
4433) -> poly16x4_t {
4434    static_assert_uimm_bits!(LANE1, 2);
4435    static_assert_uimm_bits!(LANE2, 3);
4436    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4437    unsafe {
4438        match LANE1 & 0b11 {
4439            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4440            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4441            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4442            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4443            _ => unreachable_unchecked(),
4444        }
4445    }
4446}
4447#[doc = "Insert vector element from another vector element"]
4448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4449#[inline]
4450#[target_feature(enable = "neon")]
4451#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4452#[rustc_legacy_const_generics(1, 3)]
4453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4454pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4455    a: float32x4_t,
4456    b: float32x2_t,
4457) -> float32x4_t {
4458    static_assert_uimm_bits!(LANE1, 2);
4459    static_assert_uimm_bits!(LANE2, 1);
4460    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4461    unsafe {
4462        match LANE1 & 0b11 {
4463            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4464            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4465            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4466            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4467            _ => unreachable_unchecked(),
4468        }
4469    }
4470}
4471#[doc = "Insert vector element from another vector element"]
4472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4473#[inline]
4474#[target_feature(enable = "neon")]
4475#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4476#[rustc_legacy_const_generics(1, 3)]
4477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4478pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4479    a: float64x2_t,
4480    b: float64x1_t,
4481) -> float64x2_t {
4482    static_assert_uimm_bits!(LANE1, 1);
4483    static_assert!(LANE2 == 0);
4484    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4485    unsafe {
4486        match LANE1 & 0b1 {
4487            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4488            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4489            _ => unreachable_unchecked(),
4490        }
4491    }
4492}
4493#[doc = "Insert vector element from another vector element"]
4494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4495#[inline]
4496#[target_feature(enable = "neon")]
4497#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4498#[rustc_legacy_const_generics(1, 3)]
4499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4500pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4501    a: int64x2_t,
4502    b: int64x1_t,
4503) -> int64x2_t {
4504    static_assert_uimm_bits!(LANE1, 1);
4505    static_assert!(LANE2 == 0);
4506    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4507    unsafe {
4508        match LANE1 & 0b1 {
4509            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4510            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4511            _ => unreachable_unchecked(),
4512        }
4513    }
4514}
4515#[doc = "Insert vector element from another vector element"]
4516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4517#[inline]
4518#[target_feature(enable = "neon")]
4519#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4520#[rustc_legacy_const_generics(1, 3)]
4521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4522pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4523    a: uint64x2_t,
4524    b: uint64x1_t,
4525) -> uint64x2_t {
4526    static_assert_uimm_bits!(LANE1, 1);
4527    static_assert!(LANE2 == 0);
4528    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4529    unsafe {
4530        match LANE1 & 0b1 {
4531            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4532            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4533            _ => unreachable_unchecked(),
4534        }
4535    }
4536}
4537#[doc = "Insert vector element from another vector element"]
4538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4539#[inline]
4540#[target_feature(enable = "neon")]
4541#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4542#[rustc_legacy_const_generics(1, 3)]
4543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4544pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4545    a: poly64x2_t,
4546    b: poly64x1_t,
4547) -> poly64x2_t {
4548    static_assert_uimm_bits!(LANE1, 1);
4549    static_assert!(LANE2 == 0);
4550    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4551    unsafe {
4552        match LANE1 & 0b1 {
4553            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4554            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4555            _ => unreachable_unchecked(),
4556        }
4557    }
4558}
4559#[doc = "Insert vector element from another vector element"]
4560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4561#[inline]
4562#[target_feature(enable = "neon")]
4563#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4564#[rustc_legacy_const_generics(1, 3)]
4565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4566pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4567    static_assert_uimm_bits!(LANE1, 4);
4568    static_assert_uimm_bits!(LANE2, 3);
4569    let b: int8x16_t =
4570        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4571    unsafe {
4572        match LANE1 & 0b1111 {
4573            0 => simd_shuffle!(
4574                a,
4575                b,
4576                [
4577                    16 + LANE2 as u32,
4578                    1,
4579                    2,
4580                    3,
4581                    4,
4582                    5,
4583                    6,
4584                    7,
4585                    8,
4586                    9,
4587                    10,
4588                    11,
4589                    12,
4590                    13,
4591                    14,
4592                    15
4593                ]
4594            ),
4595            1 => simd_shuffle!(
4596                a,
4597                b,
4598                [
4599                    0,
4600                    16 + LANE2 as u32,
4601                    2,
4602                    3,
4603                    4,
4604                    5,
4605                    6,
4606                    7,
4607                    8,
4608                    9,
4609                    10,
4610                    11,
4611                    12,
4612                    13,
4613                    14,
4614                    15
4615                ]
4616            ),
4617            2 => simd_shuffle!(
4618                a,
4619                b,
4620                [
4621                    0,
4622                    1,
4623                    16 + LANE2 as u32,
4624                    3,
4625                    4,
4626                    5,
4627                    6,
4628                    7,
4629                    8,
4630                    9,
4631                    10,
4632                    11,
4633                    12,
4634                    13,
4635                    14,
4636                    15
4637                ]
4638            ),
4639            3 => simd_shuffle!(
4640                a,
4641                b,
4642                [
4643                    0,
4644                    1,
4645                    2,
4646                    16 + LANE2 as u32,
4647                    4,
4648                    5,
4649                    6,
4650                    7,
4651                    8,
4652                    9,
4653                    10,
4654                    11,
4655                    12,
4656                    13,
4657                    14,
4658                    15
4659                ]
4660            ),
4661            4 => simd_shuffle!(
4662                a,
4663                b,
4664                [
4665                    0,
4666                    1,
4667                    2,
4668                    3,
4669                    16 + LANE2 as u32,
4670                    5,
4671                    6,
4672                    7,
4673                    8,
4674                    9,
4675                    10,
4676                    11,
4677                    12,
4678                    13,
4679                    14,
4680                    15
4681                ]
4682            ),
4683            5 => simd_shuffle!(
4684                a,
4685                b,
4686                [
4687                    0,
4688                    1,
4689                    2,
4690                    3,
4691                    4,
4692                    16 + LANE2 as u32,
4693                    6,
4694                    7,
4695                    8,
4696                    9,
4697                    10,
4698                    11,
4699                    12,
4700                    13,
4701                    14,
4702                    15
4703                ]
4704            ),
4705            6 => simd_shuffle!(
4706                a,
4707                b,
4708                [
4709                    0,
4710                    1,
4711                    2,
4712                    3,
4713                    4,
4714                    5,
4715                    16 + LANE2 as u32,
4716                    7,
4717                    8,
4718                    9,
4719                    10,
4720                    11,
4721                    12,
4722                    13,
4723                    14,
4724                    15
4725                ]
4726            ),
4727            7 => simd_shuffle!(
4728                a,
4729                b,
4730                [
4731                    0,
4732                    1,
4733                    2,
4734                    3,
4735                    4,
4736                    5,
4737                    6,
4738                    16 + LANE2 as u32,
4739                    8,
4740                    9,
4741                    10,
4742                    11,
4743                    12,
4744                    13,
4745                    14,
4746                    15
4747                ]
4748            ),
4749            8 => simd_shuffle!(
4750                a,
4751                b,
4752                [
4753                    0,
4754                    1,
4755                    2,
4756                    3,
4757                    4,
4758                    5,
4759                    6,
4760                    7,
4761                    16 + LANE2 as u32,
4762                    9,
4763                    10,
4764                    11,
4765                    12,
4766                    13,
4767                    14,
4768                    15
4769                ]
4770            ),
4771            9 => simd_shuffle!(
4772                a,
4773                b,
4774                [
4775                    0,
4776                    1,
4777                    2,
4778                    3,
4779                    4,
4780                    5,
4781                    6,
4782                    7,
4783                    8,
4784                    16 + LANE2 as u32,
4785                    10,
4786                    11,
4787                    12,
4788                    13,
4789                    14,
4790                    15
4791                ]
4792            ),
4793            10 => simd_shuffle!(
4794                a,
4795                b,
4796                [
4797                    0,
4798                    1,
4799                    2,
4800                    3,
4801                    4,
4802                    5,
4803                    6,
4804                    7,
4805                    8,
4806                    9,
4807                    16 + LANE2 as u32,
4808                    11,
4809                    12,
4810                    13,
4811                    14,
4812                    15
4813                ]
4814            ),
4815            11 => simd_shuffle!(
4816                a,
4817                b,
4818                [
4819                    0,
4820                    1,
4821                    2,
4822                    3,
4823                    4,
4824                    5,
4825                    6,
4826                    7,
4827                    8,
4828                    9,
4829                    10,
4830                    16 + LANE2 as u32,
4831                    12,
4832                    13,
4833                    14,
4834                    15
4835                ]
4836            ),
4837            12 => simd_shuffle!(
4838                a,
4839                b,
4840                [
4841                    0,
4842                    1,
4843                    2,
4844                    3,
4845                    4,
4846                    5,
4847                    6,
4848                    7,
4849                    8,
4850                    9,
4851                    10,
4852                    11,
4853                    16 + LANE2 as u32,
4854                    13,
4855                    14,
4856                    15
4857                ]
4858            ),
4859            13 => simd_shuffle!(
4860                a,
4861                b,
4862                [
4863                    0,
4864                    1,
4865                    2,
4866                    3,
4867                    4,
4868                    5,
4869                    6,
4870                    7,
4871                    8,
4872                    9,
4873                    10,
4874                    11,
4875                    12,
4876                    16 + LANE2 as u32,
4877                    14,
4878                    15
4879                ]
4880            ),
4881            14 => simd_shuffle!(
4882                a,
4883                b,
4884                [
4885                    0,
4886                    1,
4887                    2,
4888                    3,
4889                    4,
4890                    5,
4891                    6,
4892                    7,
4893                    8,
4894                    9,
4895                    10,
4896                    11,
4897                    12,
4898                    13,
4899                    16 + LANE2 as u32,
4900                    15
4901                ]
4902            ),
4903            15 => simd_shuffle!(
4904                a,
4905                b,
4906                [
4907                    0,
4908                    1,
4909                    2,
4910                    3,
4911                    4,
4912                    5,
4913                    6,
4914                    7,
4915                    8,
4916                    9,
4917                    10,
4918                    11,
4919                    12,
4920                    13,
4921                    14,
4922                    16 + LANE2 as u32
4923                ]
4924            ),
4925            _ => unreachable_unchecked(),
4926        }
4927    }
4928}
4929#[doc = "Insert vector element from another vector element"]
4930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4931#[inline]
4932#[target_feature(enable = "neon")]
4933#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4934#[rustc_legacy_const_generics(1, 3)]
4935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4936pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4937    a: int16x8_t,
4938    b: int16x4_t,
4939) -> int16x8_t {
4940    static_assert_uimm_bits!(LANE1, 3);
4941    static_assert_uimm_bits!(LANE2, 2);
4942    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4943    unsafe {
4944        match LANE1 & 0b111 {
4945            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4946            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4947            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4948            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4949            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4950            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4951            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4952            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4953            _ => unreachable_unchecked(),
4954        }
4955    }
4956}
4957#[doc = "Insert vector element from another vector element"]
4958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
4959#[inline]
4960#[target_feature(enable = "neon")]
4961#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4962#[rustc_legacy_const_generics(1, 3)]
4963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4964pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
4965    a: int32x4_t,
4966    b: int32x2_t,
4967) -> int32x4_t {
4968    static_assert_uimm_bits!(LANE1, 2);
4969    static_assert_uimm_bits!(LANE2, 1);
4970    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4971    unsafe {
4972        match LANE1 & 0b11 {
4973            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4974            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4975            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4976            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4977            _ => unreachable_unchecked(),
4978        }
4979    }
4980}
4981#[doc = "Insert vector element from another vector element"]
4982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
4983#[inline]
4984#[target_feature(enable = "neon")]
4985#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4986#[rustc_legacy_const_generics(1, 3)]
4987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4988pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
4989    a: uint8x16_t,
4990    b: uint8x8_t,
4991) -> uint8x16_t {
4992    static_assert_uimm_bits!(LANE1, 4);
4993    static_assert_uimm_bits!(LANE2, 3);
4994    let b: uint8x16_t =
4995        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4996    unsafe {
4997        match LANE1 & 0b1111 {
4998            0 => simd_shuffle!(
4999                a,
5000                b,
5001                [
5002                    16 + LANE2 as u32,
5003                    1,
5004                    2,
5005                    3,
5006                    4,
5007                    5,
5008                    6,
5009                    7,
5010                    8,
5011                    9,
5012                    10,
5013                    11,
5014                    12,
5015                    13,
5016                    14,
5017                    15
5018                ]
5019            ),
5020            1 => simd_shuffle!(
5021                a,
5022                b,
5023                [
5024                    0,
5025                    16 + LANE2 as u32,
5026                    2,
5027                    3,
5028                    4,
5029                    5,
5030                    6,
5031                    7,
5032                    8,
5033                    9,
5034                    10,
5035                    11,
5036                    12,
5037                    13,
5038                    14,
5039                    15
5040                ]
5041            ),
5042            2 => simd_shuffle!(
5043                a,
5044                b,
5045                [
5046                    0,
5047                    1,
5048                    16 + LANE2 as u32,
5049                    3,
5050                    4,
5051                    5,
5052                    6,
5053                    7,
5054                    8,
5055                    9,
5056                    10,
5057                    11,
5058                    12,
5059                    13,
5060                    14,
5061                    15
5062                ]
5063            ),
5064            3 => simd_shuffle!(
5065                a,
5066                b,
5067                [
5068                    0,
5069                    1,
5070                    2,
5071                    16 + LANE2 as u32,
5072                    4,
5073                    5,
5074                    6,
5075                    7,
5076                    8,
5077                    9,
5078                    10,
5079                    11,
5080                    12,
5081                    13,
5082                    14,
5083                    15
5084                ]
5085            ),
5086            4 => simd_shuffle!(
5087                a,
5088                b,
5089                [
5090                    0,
5091                    1,
5092                    2,
5093                    3,
5094                    16 + LANE2 as u32,
5095                    5,
5096                    6,
5097                    7,
5098                    8,
5099                    9,
5100                    10,
5101                    11,
5102                    12,
5103                    13,
5104                    14,
5105                    15
5106                ]
5107            ),
5108            5 => simd_shuffle!(
5109                a,
5110                b,
5111                [
5112                    0,
5113                    1,
5114                    2,
5115                    3,
5116                    4,
5117                    16 + LANE2 as u32,
5118                    6,
5119                    7,
5120                    8,
5121                    9,
5122                    10,
5123                    11,
5124                    12,
5125                    13,
5126                    14,
5127                    15
5128                ]
5129            ),
5130            6 => simd_shuffle!(
5131                a,
5132                b,
5133                [
5134                    0,
5135                    1,
5136                    2,
5137                    3,
5138                    4,
5139                    5,
5140                    16 + LANE2 as u32,
5141                    7,
5142                    8,
5143                    9,
5144                    10,
5145                    11,
5146                    12,
5147                    13,
5148                    14,
5149                    15
5150                ]
5151            ),
5152            7 => simd_shuffle!(
5153                a,
5154                b,
5155                [
5156                    0,
5157                    1,
5158                    2,
5159                    3,
5160                    4,
5161                    5,
5162                    6,
5163                    16 + LANE2 as u32,
5164                    8,
5165                    9,
5166                    10,
5167                    11,
5168                    12,
5169                    13,
5170                    14,
5171                    15
5172                ]
5173            ),
5174            8 => simd_shuffle!(
5175                a,
5176                b,
5177                [
5178                    0,
5179                    1,
5180                    2,
5181                    3,
5182                    4,
5183                    5,
5184                    6,
5185                    7,
5186                    16 + LANE2 as u32,
5187                    9,
5188                    10,
5189                    11,
5190                    12,
5191                    13,
5192                    14,
5193                    15
5194                ]
5195            ),
5196            9 => simd_shuffle!(
5197                a,
5198                b,
5199                [
5200                    0,
5201                    1,
5202                    2,
5203                    3,
5204                    4,
5205                    5,
5206                    6,
5207                    7,
5208                    8,
5209                    16 + LANE2 as u32,
5210                    10,
5211                    11,
5212                    12,
5213                    13,
5214                    14,
5215                    15
5216                ]
5217            ),
5218            10 => simd_shuffle!(
5219                a,
5220                b,
5221                [
5222                    0,
5223                    1,
5224                    2,
5225                    3,
5226                    4,
5227                    5,
5228                    6,
5229                    7,
5230                    8,
5231                    9,
5232                    16 + LANE2 as u32,
5233                    11,
5234                    12,
5235                    13,
5236                    14,
5237                    15
5238                ]
5239            ),
5240            11 => simd_shuffle!(
5241                a,
5242                b,
5243                [
5244                    0,
5245                    1,
5246                    2,
5247                    3,
5248                    4,
5249                    5,
5250                    6,
5251                    7,
5252                    8,
5253                    9,
5254                    10,
5255                    16 + LANE2 as u32,
5256                    12,
5257                    13,
5258                    14,
5259                    15
5260                ]
5261            ),
5262            12 => simd_shuffle!(
5263                a,
5264                b,
5265                [
5266                    0,
5267                    1,
5268                    2,
5269                    3,
5270                    4,
5271                    5,
5272                    6,
5273                    7,
5274                    8,
5275                    9,
5276                    10,
5277                    11,
5278                    16 + LANE2 as u32,
5279                    13,
5280                    14,
5281                    15
5282                ]
5283            ),
5284            13 => simd_shuffle!(
5285                a,
5286                b,
5287                [
5288                    0,
5289                    1,
5290                    2,
5291                    3,
5292                    4,
5293                    5,
5294                    6,
5295                    7,
5296                    8,
5297                    9,
5298                    10,
5299                    11,
5300                    12,
5301                    16 + LANE2 as u32,
5302                    14,
5303                    15
5304                ]
5305            ),
5306            14 => simd_shuffle!(
5307                a,
5308                b,
5309                [
5310                    0,
5311                    1,
5312                    2,
5313                    3,
5314                    4,
5315                    5,
5316                    6,
5317                    7,
5318                    8,
5319                    9,
5320                    10,
5321                    11,
5322                    12,
5323                    13,
5324                    16 + LANE2 as u32,
5325                    15
5326                ]
5327            ),
5328            15 => simd_shuffle!(
5329                a,
5330                b,
5331                [
5332                    0,
5333                    1,
5334                    2,
5335                    3,
5336                    4,
5337                    5,
5338                    6,
5339                    7,
5340                    8,
5341                    9,
5342                    10,
5343                    11,
5344                    12,
5345                    13,
5346                    14,
5347                    16 + LANE2 as u32
5348                ]
5349            ),
5350            _ => unreachable_unchecked(),
5351        }
5352    }
5353}
5354#[doc = "Insert vector element from another vector element"]
5355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5356#[inline]
5357#[target_feature(enable = "neon")]
5358#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5359#[rustc_legacy_const_generics(1, 3)]
5360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5361pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5362    a: uint16x8_t,
5363    b: uint16x4_t,
5364) -> uint16x8_t {
5365    static_assert_uimm_bits!(LANE1, 3);
5366    static_assert_uimm_bits!(LANE2, 2);
5367    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5368    unsafe {
5369        match LANE1 & 0b111 {
5370            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5371            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5372            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5373            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5374            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5375            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5376            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5377            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5378            _ => unreachable_unchecked(),
5379        }
5380    }
5381}
5382#[doc = "Insert vector element from another vector element"]
5383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5384#[inline]
5385#[target_feature(enable = "neon")]
5386#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5387#[rustc_legacy_const_generics(1, 3)]
5388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5389pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5390    a: uint32x4_t,
5391    b: uint32x2_t,
5392) -> uint32x4_t {
5393    static_assert_uimm_bits!(LANE1, 2);
5394    static_assert_uimm_bits!(LANE2, 1);
5395    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5396    unsafe {
5397        match LANE1 & 0b11 {
5398            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5399            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5400            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5401            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5402            _ => unreachable_unchecked(),
5403        }
5404    }
5405}
5406#[doc = "Insert vector element from another vector element"]
5407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5408#[inline]
5409#[target_feature(enable = "neon")]
5410#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5411#[rustc_legacy_const_generics(1, 3)]
5412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5413pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5414    a: poly8x16_t,
5415    b: poly8x8_t,
5416) -> poly8x16_t {
5417    static_assert_uimm_bits!(LANE1, 4);
5418    static_assert_uimm_bits!(LANE2, 3);
5419    let b: poly8x16_t =
5420        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5421    unsafe {
5422        match LANE1 & 0b1111 {
5423            0 => simd_shuffle!(
5424                a,
5425                b,
5426                [
5427                    16 + LANE2 as u32,
5428                    1,
5429                    2,
5430                    3,
5431                    4,
5432                    5,
5433                    6,
5434                    7,
5435                    8,
5436                    9,
5437                    10,
5438                    11,
5439                    12,
5440                    13,
5441                    14,
5442                    15
5443                ]
5444            ),
5445            1 => simd_shuffle!(
5446                a,
5447                b,
5448                [
5449                    0,
5450                    16 + LANE2 as u32,
5451                    2,
5452                    3,
5453                    4,
5454                    5,
5455                    6,
5456                    7,
5457                    8,
5458                    9,
5459                    10,
5460                    11,
5461                    12,
5462                    13,
5463                    14,
5464                    15
5465                ]
5466            ),
5467            2 => simd_shuffle!(
5468                a,
5469                b,
5470                [
5471                    0,
5472                    1,
5473                    16 + LANE2 as u32,
5474                    3,
5475                    4,
5476                    5,
5477                    6,
5478                    7,
5479                    8,
5480                    9,
5481                    10,
5482                    11,
5483                    12,
5484                    13,
5485                    14,
5486                    15
5487                ]
5488            ),
5489            3 => simd_shuffle!(
5490                a,
5491                b,
5492                [
5493                    0,
5494                    1,
5495                    2,
5496                    16 + LANE2 as u32,
5497                    4,
5498                    5,
5499                    6,
5500                    7,
5501                    8,
5502                    9,
5503                    10,
5504                    11,
5505                    12,
5506                    13,
5507                    14,
5508                    15
5509                ]
5510            ),
5511            4 => simd_shuffle!(
5512                a,
5513                b,
5514                [
5515                    0,
5516                    1,
5517                    2,
5518                    3,
5519                    16 + LANE2 as u32,
5520                    5,
5521                    6,
5522                    7,
5523                    8,
5524                    9,
5525                    10,
5526                    11,
5527                    12,
5528                    13,
5529                    14,
5530                    15
5531                ]
5532            ),
5533            5 => simd_shuffle!(
5534                a,
5535                b,
5536                [
5537                    0,
5538                    1,
5539                    2,
5540                    3,
5541                    4,
5542                    16 + LANE2 as u32,
5543                    6,
5544                    7,
5545                    8,
5546                    9,
5547                    10,
5548                    11,
5549                    12,
5550                    13,
5551                    14,
5552                    15
5553                ]
5554            ),
5555            6 => simd_shuffle!(
5556                a,
5557                b,
5558                [
5559                    0,
5560                    1,
5561                    2,
5562                    3,
5563                    4,
5564                    5,
5565                    16 + LANE2 as u32,
5566                    7,
5567                    8,
5568                    9,
5569                    10,
5570                    11,
5571                    12,
5572                    13,
5573                    14,
5574                    15
5575                ]
5576            ),
5577            7 => simd_shuffle!(
5578                a,
5579                b,
5580                [
5581                    0,
5582                    1,
5583                    2,
5584                    3,
5585                    4,
5586                    5,
5587                    6,
5588                    16 + LANE2 as u32,
5589                    8,
5590                    9,
5591                    10,
5592                    11,
5593                    12,
5594                    13,
5595                    14,
5596                    15
5597                ]
5598            ),
5599            8 => simd_shuffle!(
5600                a,
5601                b,
5602                [
5603                    0,
5604                    1,
5605                    2,
5606                    3,
5607                    4,
5608                    5,
5609                    6,
5610                    7,
5611                    16 + LANE2 as u32,
5612                    9,
5613                    10,
5614                    11,
5615                    12,
5616                    13,
5617                    14,
5618                    15
5619                ]
5620            ),
5621            9 => simd_shuffle!(
5622                a,
5623                b,
5624                [
5625                    0,
5626                    1,
5627                    2,
5628                    3,
5629                    4,
5630                    5,
5631                    6,
5632                    7,
5633                    8,
5634                    16 + LANE2 as u32,
5635                    10,
5636                    11,
5637                    12,
5638                    13,
5639                    14,
5640                    15
5641                ]
5642            ),
5643            10 => simd_shuffle!(
5644                a,
5645                b,
5646                [
5647                    0,
5648                    1,
5649                    2,
5650                    3,
5651                    4,
5652                    5,
5653                    6,
5654                    7,
5655                    8,
5656                    9,
5657                    16 + LANE2 as u32,
5658                    11,
5659                    12,
5660                    13,
5661                    14,
5662                    15
5663                ]
5664            ),
5665            11 => simd_shuffle!(
5666                a,
5667                b,
5668                [
5669                    0,
5670                    1,
5671                    2,
5672                    3,
5673                    4,
5674                    5,
5675                    6,
5676                    7,
5677                    8,
5678                    9,
5679                    10,
5680                    16 + LANE2 as u32,
5681                    12,
5682                    13,
5683                    14,
5684                    15
5685                ]
5686            ),
5687            12 => simd_shuffle!(
5688                a,
5689                b,
5690                [
5691                    0,
5692                    1,
5693                    2,
5694                    3,
5695                    4,
5696                    5,
5697                    6,
5698                    7,
5699                    8,
5700                    9,
5701                    10,
5702                    11,
5703                    16 + LANE2 as u32,
5704                    13,
5705                    14,
5706                    15
5707                ]
5708            ),
5709            13 => simd_shuffle!(
5710                a,
5711                b,
5712                [
5713                    0,
5714                    1,
5715                    2,
5716                    3,
5717                    4,
5718                    5,
5719                    6,
5720                    7,
5721                    8,
5722                    9,
5723                    10,
5724                    11,
5725                    12,
5726                    16 + LANE2 as u32,
5727                    14,
5728                    15
5729                ]
5730            ),
5731            14 => simd_shuffle!(
5732                a,
5733                b,
5734                [
5735                    0,
5736                    1,
5737                    2,
5738                    3,
5739                    4,
5740                    5,
5741                    6,
5742                    7,
5743                    8,
5744                    9,
5745                    10,
5746                    11,
5747                    12,
5748                    13,
5749                    16 + LANE2 as u32,
5750                    15
5751                ]
5752            ),
5753            15 => simd_shuffle!(
5754                a,
5755                b,
5756                [
5757                    0,
5758                    1,
5759                    2,
5760                    3,
5761                    4,
5762                    5,
5763                    6,
5764                    7,
5765                    8,
5766                    9,
5767                    10,
5768                    11,
5769                    12,
5770                    13,
5771                    14,
5772                    16 + LANE2 as u32
5773                ]
5774            ),
5775            _ => unreachable_unchecked(),
5776        }
5777    }
5778}
5779#[doc = "Insert vector element from another vector element"]
5780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5781#[inline]
5782#[target_feature(enable = "neon")]
5783#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5784#[rustc_legacy_const_generics(1, 3)]
5785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5786pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5787    a: poly16x8_t,
5788    b: poly16x4_t,
5789) -> poly16x8_t {
5790    static_assert_uimm_bits!(LANE1, 3);
5791    static_assert_uimm_bits!(LANE2, 2);
5792    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5793    unsafe {
5794        match LANE1 & 0b111 {
5795            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5796            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5797            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5798            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5799            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5800            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5801            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5802            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5803            _ => unreachable_unchecked(),
5804        }
5805    }
5806}
5807#[doc = "Insert vector element from another vector element"]
5808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5809#[inline]
5810#[target_feature(enable = "neon")]
5811#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5812#[rustc_legacy_const_generics(1, 3)]
5813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5814pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5815    a: float32x4_t,
5816    b: float32x4_t,
5817) -> float32x4_t {
5818    static_assert_uimm_bits!(LANE1, 2);
5819    static_assert_uimm_bits!(LANE2, 2);
5820    unsafe {
5821        match LANE1 & 0b11 {
5822            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5823            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5824            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5825            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5826            _ => unreachable_unchecked(),
5827        }
5828    }
5829}
5830#[doc = "Insert vector element from another vector element"]
5831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5832#[inline]
5833#[target_feature(enable = "neon")]
5834#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5835#[rustc_legacy_const_generics(1, 3)]
5836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5837pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5838    a: float64x2_t,
5839    b: float64x2_t,
5840) -> float64x2_t {
5841    static_assert_uimm_bits!(LANE1, 1);
5842    static_assert_uimm_bits!(LANE2, 1);
5843    unsafe {
5844        match LANE1 & 0b1 {
5845            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5846            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5847            _ => unreachable_unchecked(),
5848        }
5849    }
5850}
5851#[doc = "Insert vector element from another vector element"]
5852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5853#[inline]
5854#[target_feature(enable = "neon")]
5855#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5856#[rustc_legacy_const_generics(1, 3)]
5857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5858pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5859    a: int8x16_t,
5860    b: int8x16_t,
5861) -> int8x16_t {
5862    static_assert_uimm_bits!(LANE1, 4);
5863    static_assert_uimm_bits!(LANE2, 4);
5864    unsafe {
5865        match LANE1 & 0b1111 {
5866            0 => simd_shuffle!(
5867                a,
5868                b,
5869                [
5870                    16 + LANE2 as u32,
5871                    1,
5872                    2,
5873                    3,
5874                    4,
5875                    5,
5876                    6,
5877                    7,
5878                    8,
5879                    9,
5880                    10,
5881                    11,
5882                    12,
5883                    13,
5884                    14,
5885                    15
5886                ]
5887            ),
5888            1 => simd_shuffle!(
5889                a,
5890                b,
5891                [
5892                    0,
5893                    16 + LANE2 as u32,
5894                    2,
5895                    3,
5896                    4,
5897                    5,
5898                    6,
5899                    7,
5900                    8,
5901                    9,
5902                    10,
5903                    11,
5904                    12,
5905                    13,
5906                    14,
5907                    15
5908                ]
5909            ),
5910            2 => simd_shuffle!(
5911                a,
5912                b,
5913                [
5914                    0,
5915                    1,
5916                    16 + LANE2 as u32,
5917                    3,
5918                    4,
5919                    5,
5920                    6,
5921                    7,
5922                    8,
5923                    9,
5924                    10,
5925                    11,
5926                    12,
5927                    13,
5928                    14,
5929                    15
5930                ]
5931            ),
5932            3 => simd_shuffle!(
5933                a,
5934                b,
5935                [
5936                    0,
5937                    1,
5938                    2,
5939                    16 + LANE2 as u32,
5940                    4,
5941                    5,
5942                    6,
5943                    7,
5944                    8,
5945                    9,
5946                    10,
5947                    11,
5948                    12,
5949                    13,
5950                    14,
5951                    15
5952                ]
5953            ),
5954            4 => simd_shuffle!(
5955                a,
5956                b,
5957                [
5958                    0,
5959                    1,
5960                    2,
5961                    3,
5962                    16 + LANE2 as u32,
5963                    5,
5964                    6,
5965                    7,
5966                    8,
5967                    9,
5968                    10,
5969                    11,
5970                    12,
5971                    13,
5972                    14,
5973                    15
5974                ]
5975            ),
5976            5 => simd_shuffle!(
5977                a,
5978                b,
5979                [
5980                    0,
5981                    1,
5982                    2,
5983                    3,
5984                    4,
5985                    16 + LANE2 as u32,
5986                    6,
5987                    7,
5988                    8,
5989                    9,
5990                    10,
5991                    11,
5992                    12,
5993                    13,
5994                    14,
5995                    15
5996                ]
5997            ),
5998            6 => simd_shuffle!(
5999                a,
6000                b,
6001                [
6002                    0,
6003                    1,
6004                    2,
6005                    3,
6006                    4,
6007                    5,
6008                    16 + LANE2 as u32,
6009                    7,
6010                    8,
6011                    9,
6012                    10,
6013                    11,
6014                    12,
6015                    13,
6016                    14,
6017                    15
6018                ]
6019            ),
6020            7 => simd_shuffle!(
6021                a,
6022                b,
6023                [
6024                    0,
6025                    1,
6026                    2,
6027                    3,
6028                    4,
6029                    5,
6030                    6,
6031                    16 + LANE2 as u32,
6032                    8,
6033                    9,
6034                    10,
6035                    11,
6036                    12,
6037                    13,
6038                    14,
6039                    15
6040                ]
6041            ),
6042            8 => simd_shuffle!(
6043                a,
6044                b,
6045                [
6046                    0,
6047                    1,
6048                    2,
6049                    3,
6050                    4,
6051                    5,
6052                    6,
6053                    7,
6054                    16 + LANE2 as u32,
6055                    9,
6056                    10,
6057                    11,
6058                    12,
6059                    13,
6060                    14,
6061                    15
6062                ]
6063            ),
6064            9 => simd_shuffle!(
6065                a,
6066                b,
6067                [
6068                    0,
6069                    1,
6070                    2,
6071                    3,
6072                    4,
6073                    5,
6074                    6,
6075                    7,
6076                    8,
6077                    16 + LANE2 as u32,
6078                    10,
6079                    11,
6080                    12,
6081                    13,
6082                    14,
6083                    15
6084                ]
6085            ),
6086            10 => simd_shuffle!(
6087                a,
6088                b,
6089                [
6090                    0,
6091                    1,
6092                    2,
6093                    3,
6094                    4,
6095                    5,
6096                    6,
6097                    7,
6098                    8,
6099                    9,
6100                    16 + LANE2 as u32,
6101                    11,
6102                    12,
6103                    13,
6104                    14,
6105                    15
6106                ]
6107            ),
6108            11 => simd_shuffle!(
6109                a,
6110                b,
6111                [
6112                    0,
6113                    1,
6114                    2,
6115                    3,
6116                    4,
6117                    5,
6118                    6,
6119                    7,
6120                    8,
6121                    9,
6122                    10,
6123                    16 + LANE2 as u32,
6124                    12,
6125                    13,
6126                    14,
6127                    15
6128                ]
6129            ),
6130            12 => simd_shuffle!(
6131                a,
6132                b,
6133                [
6134                    0,
6135                    1,
6136                    2,
6137                    3,
6138                    4,
6139                    5,
6140                    6,
6141                    7,
6142                    8,
6143                    9,
6144                    10,
6145                    11,
6146                    16 + LANE2 as u32,
6147                    13,
6148                    14,
6149                    15
6150                ]
6151            ),
6152            13 => simd_shuffle!(
6153                a,
6154                b,
6155                [
6156                    0,
6157                    1,
6158                    2,
6159                    3,
6160                    4,
6161                    5,
6162                    6,
6163                    7,
6164                    8,
6165                    9,
6166                    10,
6167                    11,
6168                    12,
6169                    16 + LANE2 as u32,
6170                    14,
6171                    15
6172                ]
6173            ),
6174            14 => simd_shuffle!(
6175                a,
6176                b,
6177                [
6178                    0,
6179                    1,
6180                    2,
6181                    3,
6182                    4,
6183                    5,
6184                    6,
6185                    7,
6186                    8,
6187                    9,
6188                    10,
6189                    11,
6190                    12,
6191                    13,
6192                    16 + LANE2 as u32,
6193                    15
6194                ]
6195            ),
6196            15 => simd_shuffle!(
6197                a,
6198                b,
6199                [
6200                    0,
6201                    1,
6202                    2,
6203                    3,
6204                    4,
6205                    5,
6206                    6,
6207                    7,
6208                    8,
6209                    9,
6210                    10,
6211                    11,
6212                    12,
6213                    13,
6214                    14,
6215                    16 + LANE2 as u32
6216                ]
6217            ),
6218            _ => unreachable_unchecked(),
6219        }
6220    }
6221}
6222#[doc = "Insert vector element from another vector element"]
6223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6224#[inline]
6225#[target_feature(enable = "neon")]
6226#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6227#[rustc_legacy_const_generics(1, 3)]
6228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6229pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6230    a: int16x8_t,
6231    b: int16x8_t,
6232) -> int16x8_t {
6233    static_assert_uimm_bits!(LANE1, 3);
6234    static_assert_uimm_bits!(LANE2, 3);
6235    unsafe {
6236        match LANE1 & 0b111 {
6237            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6238            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6239            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6240            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6241            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6242            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6243            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6244            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6245            _ => unreachable_unchecked(),
6246        }
6247    }
6248}
6249#[doc = "Insert vector element from another vector element"]
6250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6251#[inline]
6252#[target_feature(enable = "neon")]
6253#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6254#[rustc_legacy_const_generics(1, 3)]
6255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6256pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6257    a: int32x4_t,
6258    b: int32x4_t,
6259) -> int32x4_t {
6260    static_assert_uimm_bits!(LANE1, 2);
6261    static_assert_uimm_bits!(LANE2, 2);
6262    unsafe {
6263        match LANE1 & 0b11 {
6264            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6265            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6266            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6267            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6268            _ => unreachable_unchecked(),
6269        }
6270    }
6271}
6272#[doc = "Insert vector element from another vector element"]
6273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6274#[inline]
6275#[target_feature(enable = "neon")]
6276#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6277#[rustc_legacy_const_generics(1, 3)]
6278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6279pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6280    a: int64x2_t,
6281    b: int64x2_t,
6282) -> int64x2_t {
6283    static_assert_uimm_bits!(LANE1, 1);
6284    static_assert_uimm_bits!(LANE2, 1);
6285    unsafe {
6286        match LANE1 & 0b1 {
6287            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6288            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6289            _ => unreachable_unchecked(),
6290        }
6291    }
6292}
6293#[doc = "Insert vector element from another vector element"]
6294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6295#[inline]
6296#[target_feature(enable = "neon")]
6297#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6298#[rustc_legacy_const_generics(1, 3)]
6299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6300pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6301    a: uint8x16_t,
6302    b: uint8x16_t,
6303) -> uint8x16_t {
6304    static_assert_uimm_bits!(LANE1, 4);
6305    static_assert_uimm_bits!(LANE2, 4);
6306    unsafe {
6307        match LANE1 & 0b1111 {
6308            0 => simd_shuffle!(
6309                a,
6310                b,
6311                [
6312                    16 + LANE2 as u32,
6313                    1,
6314                    2,
6315                    3,
6316                    4,
6317                    5,
6318                    6,
6319                    7,
6320                    8,
6321                    9,
6322                    10,
6323                    11,
6324                    12,
6325                    13,
6326                    14,
6327                    15
6328                ]
6329            ),
6330            1 => simd_shuffle!(
6331                a,
6332                b,
6333                [
6334                    0,
6335                    16 + LANE2 as u32,
6336                    2,
6337                    3,
6338                    4,
6339                    5,
6340                    6,
6341                    7,
6342                    8,
6343                    9,
6344                    10,
6345                    11,
6346                    12,
6347                    13,
6348                    14,
6349                    15
6350                ]
6351            ),
6352            2 => simd_shuffle!(
6353                a,
6354                b,
6355                [
6356                    0,
6357                    1,
6358                    16 + LANE2 as u32,
6359                    3,
6360                    4,
6361                    5,
6362                    6,
6363                    7,
6364                    8,
6365                    9,
6366                    10,
6367                    11,
6368                    12,
6369                    13,
6370                    14,
6371                    15
6372                ]
6373            ),
6374            3 => simd_shuffle!(
6375                a,
6376                b,
6377                [
6378                    0,
6379                    1,
6380                    2,
6381                    16 + LANE2 as u32,
6382                    4,
6383                    5,
6384                    6,
6385                    7,
6386                    8,
6387                    9,
6388                    10,
6389                    11,
6390                    12,
6391                    13,
6392                    14,
6393                    15
6394                ]
6395            ),
6396            4 => simd_shuffle!(
6397                a,
6398                b,
6399                [
6400                    0,
6401                    1,
6402                    2,
6403                    3,
6404                    16 + LANE2 as u32,
6405                    5,
6406                    6,
6407                    7,
6408                    8,
6409                    9,
6410                    10,
6411                    11,
6412                    12,
6413                    13,
6414                    14,
6415                    15
6416                ]
6417            ),
6418            5 => simd_shuffle!(
6419                a,
6420                b,
6421                [
6422                    0,
6423                    1,
6424                    2,
6425                    3,
6426                    4,
6427                    16 + LANE2 as u32,
6428                    6,
6429                    7,
6430                    8,
6431                    9,
6432                    10,
6433                    11,
6434                    12,
6435                    13,
6436                    14,
6437                    15
6438                ]
6439            ),
6440            6 => simd_shuffle!(
6441                a,
6442                b,
6443                [
6444                    0,
6445                    1,
6446                    2,
6447                    3,
6448                    4,
6449                    5,
6450                    16 + LANE2 as u32,
6451                    7,
6452                    8,
6453                    9,
6454                    10,
6455                    11,
6456                    12,
6457                    13,
6458                    14,
6459                    15
6460                ]
6461            ),
6462            7 => simd_shuffle!(
6463                a,
6464                b,
6465                [
6466                    0,
6467                    1,
6468                    2,
6469                    3,
6470                    4,
6471                    5,
6472                    6,
6473                    16 + LANE2 as u32,
6474                    8,
6475                    9,
6476                    10,
6477                    11,
6478                    12,
6479                    13,
6480                    14,
6481                    15
6482                ]
6483            ),
6484            8 => simd_shuffle!(
6485                a,
6486                b,
6487                [
6488                    0,
6489                    1,
6490                    2,
6491                    3,
6492                    4,
6493                    5,
6494                    6,
6495                    7,
6496                    16 + LANE2 as u32,
6497                    9,
6498                    10,
6499                    11,
6500                    12,
6501                    13,
6502                    14,
6503                    15
6504                ]
6505            ),
6506            9 => simd_shuffle!(
6507                a,
6508                b,
6509                [
6510                    0,
6511                    1,
6512                    2,
6513                    3,
6514                    4,
6515                    5,
6516                    6,
6517                    7,
6518                    8,
6519                    16 + LANE2 as u32,
6520                    10,
6521                    11,
6522                    12,
6523                    13,
6524                    14,
6525                    15
6526                ]
6527            ),
6528            10 => simd_shuffle!(
6529                a,
6530                b,
6531                [
6532                    0,
6533                    1,
6534                    2,
6535                    3,
6536                    4,
6537                    5,
6538                    6,
6539                    7,
6540                    8,
6541                    9,
6542                    16 + LANE2 as u32,
6543                    11,
6544                    12,
6545                    13,
6546                    14,
6547                    15
6548                ]
6549            ),
6550            11 => simd_shuffle!(
6551                a,
6552                b,
6553                [
6554                    0,
6555                    1,
6556                    2,
6557                    3,
6558                    4,
6559                    5,
6560                    6,
6561                    7,
6562                    8,
6563                    9,
6564                    10,
6565                    16 + LANE2 as u32,
6566                    12,
6567                    13,
6568                    14,
6569                    15
6570                ]
6571            ),
6572            12 => simd_shuffle!(
6573                a,
6574                b,
6575                [
6576                    0,
6577                    1,
6578                    2,
6579                    3,
6580                    4,
6581                    5,
6582                    6,
6583                    7,
6584                    8,
6585                    9,
6586                    10,
6587                    11,
6588                    16 + LANE2 as u32,
6589                    13,
6590                    14,
6591                    15
6592                ]
6593            ),
6594            13 => simd_shuffle!(
6595                a,
6596                b,
6597                [
6598                    0,
6599                    1,
6600                    2,
6601                    3,
6602                    4,
6603                    5,
6604                    6,
6605                    7,
6606                    8,
6607                    9,
6608                    10,
6609                    11,
6610                    12,
6611                    16 + LANE2 as u32,
6612                    14,
6613                    15
6614                ]
6615            ),
6616            14 => simd_shuffle!(
6617                a,
6618                b,
6619                [
6620                    0,
6621                    1,
6622                    2,
6623                    3,
6624                    4,
6625                    5,
6626                    6,
6627                    7,
6628                    8,
6629                    9,
6630                    10,
6631                    11,
6632                    12,
6633                    13,
6634                    16 + LANE2 as u32,
6635                    15
6636                ]
6637            ),
6638            15 => simd_shuffle!(
6639                a,
6640                b,
6641                [
6642                    0,
6643                    1,
6644                    2,
6645                    3,
6646                    4,
6647                    5,
6648                    6,
6649                    7,
6650                    8,
6651                    9,
6652                    10,
6653                    11,
6654                    12,
6655                    13,
6656                    14,
6657                    16 + LANE2 as u32
6658                ]
6659            ),
6660            _ => unreachable_unchecked(),
6661        }
6662    }
6663}
6664#[doc = "Insert vector element from another vector element"]
6665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6666#[inline]
6667#[target_feature(enable = "neon")]
6668#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6669#[rustc_legacy_const_generics(1, 3)]
6670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6671pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6672    a: uint16x8_t,
6673    b: uint16x8_t,
6674) -> uint16x8_t {
6675    static_assert_uimm_bits!(LANE1, 3);
6676    static_assert_uimm_bits!(LANE2, 3);
6677    unsafe {
6678        match LANE1 & 0b111 {
6679            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6680            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6681            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6682            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6683            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6684            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6685            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6686            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6687            _ => unreachable_unchecked(),
6688        }
6689    }
6690}
6691#[doc = "Insert vector element from another vector element"]
6692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6693#[inline]
6694#[target_feature(enable = "neon")]
6695#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6696#[rustc_legacy_const_generics(1, 3)]
6697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6698pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6699    a: uint32x4_t,
6700    b: uint32x4_t,
6701) -> uint32x4_t {
6702    static_assert_uimm_bits!(LANE1, 2);
6703    static_assert_uimm_bits!(LANE2, 2);
6704    unsafe {
6705        match LANE1 & 0b11 {
6706            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6707            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6708            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6709            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6710            _ => unreachable_unchecked(),
6711        }
6712    }
6713}
6714#[doc = "Insert vector element from another vector element"]
6715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6716#[inline]
6717#[target_feature(enable = "neon")]
6718#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6719#[rustc_legacy_const_generics(1, 3)]
6720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6721pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6722    a: uint64x2_t,
6723    b: uint64x2_t,
6724) -> uint64x2_t {
6725    static_assert_uimm_bits!(LANE1, 1);
6726    static_assert_uimm_bits!(LANE2, 1);
6727    unsafe {
6728        match LANE1 & 0b1 {
6729            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6730            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6731            _ => unreachable_unchecked(),
6732        }
6733    }
6734}
6735#[doc = "Insert vector element from another vector element"]
6736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6737#[inline]
6738#[target_feature(enable = "neon")]
6739#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6740#[rustc_legacy_const_generics(1, 3)]
6741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6742pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6743    a: poly8x16_t,
6744    b: poly8x16_t,
6745) -> poly8x16_t {
6746    static_assert_uimm_bits!(LANE1, 4);
6747    static_assert_uimm_bits!(LANE2, 4);
6748    unsafe {
6749        match LANE1 & 0b1111 {
6750            0 => simd_shuffle!(
6751                a,
6752                b,
6753                [
6754                    16 + LANE2 as u32,
6755                    1,
6756                    2,
6757                    3,
6758                    4,
6759                    5,
6760                    6,
6761                    7,
6762                    8,
6763                    9,
6764                    10,
6765                    11,
6766                    12,
6767                    13,
6768                    14,
6769                    15
6770                ]
6771            ),
6772            1 => simd_shuffle!(
6773                a,
6774                b,
6775                [
6776                    0,
6777                    16 + LANE2 as u32,
6778                    2,
6779                    3,
6780                    4,
6781                    5,
6782                    6,
6783                    7,
6784                    8,
6785                    9,
6786                    10,
6787                    11,
6788                    12,
6789                    13,
6790                    14,
6791                    15
6792                ]
6793            ),
6794            2 => simd_shuffle!(
6795                a,
6796                b,
6797                [
6798                    0,
6799                    1,
6800                    16 + LANE2 as u32,
6801                    3,
6802                    4,
6803                    5,
6804                    6,
6805                    7,
6806                    8,
6807                    9,
6808                    10,
6809                    11,
6810                    12,
6811                    13,
6812                    14,
6813                    15
6814                ]
6815            ),
6816            3 => simd_shuffle!(
6817                a,
6818                b,
6819                [
6820                    0,
6821                    1,
6822                    2,
6823                    16 + LANE2 as u32,
6824                    4,
6825                    5,
6826                    6,
6827                    7,
6828                    8,
6829                    9,
6830                    10,
6831                    11,
6832                    12,
6833                    13,
6834                    14,
6835                    15
6836                ]
6837            ),
6838            4 => simd_shuffle!(
6839                a,
6840                b,
6841                [
6842                    0,
6843                    1,
6844                    2,
6845                    3,
6846                    16 + LANE2 as u32,
6847                    5,
6848                    6,
6849                    7,
6850                    8,
6851                    9,
6852                    10,
6853                    11,
6854                    12,
6855                    13,
6856                    14,
6857                    15
6858                ]
6859            ),
6860            5 => simd_shuffle!(
6861                a,
6862                b,
6863                [
6864                    0,
6865                    1,
6866                    2,
6867                    3,
6868                    4,
6869                    16 + LANE2 as u32,
6870                    6,
6871                    7,
6872                    8,
6873                    9,
6874                    10,
6875                    11,
6876                    12,
6877                    13,
6878                    14,
6879                    15
6880                ]
6881            ),
6882            6 => simd_shuffle!(
6883                a,
6884                b,
6885                [
6886                    0,
6887                    1,
6888                    2,
6889                    3,
6890                    4,
6891                    5,
6892                    16 + LANE2 as u32,
6893                    7,
6894                    8,
6895                    9,
6896                    10,
6897                    11,
6898                    12,
6899                    13,
6900                    14,
6901                    15
6902                ]
6903            ),
6904            7 => simd_shuffle!(
6905                a,
6906                b,
6907                [
6908                    0,
6909                    1,
6910                    2,
6911                    3,
6912                    4,
6913                    5,
6914                    6,
6915                    16 + LANE2 as u32,
6916                    8,
6917                    9,
6918                    10,
6919                    11,
6920                    12,
6921                    13,
6922                    14,
6923                    15
6924                ]
6925            ),
6926            8 => simd_shuffle!(
6927                a,
6928                b,
6929                [
6930                    0,
6931                    1,
6932                    2,
6933                    3,
6934                    4,
6935                    5,
6936                    6,
6937                    7,
6938                    16 + LANE2 as u32,
6939                    9,
6940                    10,
6941                    11,
6942                    12,
6943                    13,
6944                    14,
6945                    15
6946                ]
6947            ),
6948            9 => simd_shuffle!(
6949                a,
6950                b,
6951                [
6952                    0,
6953                    1,
6954                    2,
6955                    3,
6956                    4,
6957                    5,
6958                    6,
6959                    7,
6960                    8,
6961                    16 + LANE2 as u32,
6962                    10,
6963                    11,
6964                    12,
6965                    13,
6966                    14,
6967                    15
6968                ]
6969            ),
6970            10 => simd_shuffle!(
6971                a,
6972                b,
6973                [
6974                    0,
6975                    1,
6976                    2,
6977                    3,
6978                    4,
6979                    5,
6980                    6,
6981                    7,
6982                    8,
6983                    9,
6984                    16 + LANE2 as u32,
6985                    11,
6986                    12,
6987                    13,
6988                    14,
6989                    15
6990                ]
6991            ),
6992            11 => simd_shuffle!(
6993                a,
6994                b,
6995                [
6996                    0,
6997                    1,
6998                    2,
6999                    3,
7000                    4,
7001                    5,
7002                    6,
7003                    7,
7004                    8,
7005                    9,
7006                    10,
7007                    16 + LANE2 as u32,
7008                    12,
7009                    13,
7010                    14,
7011                    15
7012                ]
7013            ),
7014            12 => simd_shuffle!(
7015                a,
7016                b,
7017                [
7018                    0,
7019                    1,
7020                    2,
7021                    3,
7022                    4,
7023                    5,
7024                    6,
7025                    7,
7026                    8,
7027                    9,
7028                    10,
7029                    11,
7030                    16 + LANE2 as u32,
7031                    13,
7032                    14,
7033                    15
7034                ]
7035            ),
7036            13 => simd_shuffle!(
7037                a,
7038                b,
7039                [
7040                    0,
7041                    1,
7042                    2,
7043                    3,
7044                    4,
7045                    5,
7046                    6,
7047                    7,
7048                    8,
7049                    9,
7050                    10,
7051                    11,
7052                    12,
7053                    16 + LANE2 as u32,
7054                    14,
7055                    15
7056                ]
7057            ),
7058            14 => simd_shuffle!(
7059                a,
7060                b,
7061                [
7062                    0,
7063                    1,
7064                    2,
7065                    3,
7066                    4,
7067                    5,
7068                    6,
7069                    7,
7070                    8,
7071                    9,
7072                    10,
7073                    11,
7074                    12,
7075                    13,
7076                    16 + LANE2 as u32,
7077                    15
7078                ]
7079            ),
7080            15 => simd_shuffle!(
7081                a,
7082                b,
7083                [
7084                    0,
7085                    1,
7086                    2,
7087                    3,
7088                    4,
7089                    5,
7090                    6,
7091                    7,
7092                    8,
7093                    9,
7094                    10,
7095                    11,
7096                    12,
7097                    13,
7098                    14,
7099                    16 + LANE2 as u32
7100                ]
7101            ),
7102            _ => unreachable_unchecked(),
7103        }
7104    }
7105}
7106#[doc = "Insert vector element from another vector element"]
7107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7108#[inline]
7109#[target_feature(enable = "neon")]
7110#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7111#[rustc_legacy_const_generics(1, 3)]
7112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7113pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7114    a: poly16x8_t,
7115    b: poly16x8_t,
7116) -> poly16x8_t {
7117    static_assert_uimm_bits!(LANE1, 3);
7118    static_assert_uimm_bits!(LANE2, 3);
7119    unsafe {
7120        match LANE1 & 0b111 {
7121            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7122            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7123            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7124            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7125            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7126            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7127            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7128            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7129            _ => unreachable_unchecked(),
7130        }
7131    }
7132}
7133#[doc = "Insert vector element from another vector element"]
7134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7135#[inline]
7136#[target_feature(enable = "neon")]
7137#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7138#[rustc_legacy_const_generics(1, 3)]
7139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7140pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7141    a: poly64x2_t,
7142    b: poly64x2_t,
7143) -> poly64x2_t {
7144    static_assert_uimm_bits!(LANE1, 1);
7145    static_assert_uimm_bits!(LANE2, 1);
7146    unsafe {
7147        match LANE1 & 0b1 {
7148            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7149            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7150            _ => unreachable_unchecked(),
7151        }
7152    }
7153}
7154#[doc = "Insert vector element from another vector element"]
7155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7156#[inline]
7157#[target_feature(enable = "neon")]
7158#[cfg_attr(test, assert_instr(nop))]
7159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7160pub fn vcreate_f64(a: u64) -> float64x1_t {
7161    unsafe { transmute(a) }
7162}
7163#[doc = "Floating-point convert"]
7164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7165#[inline]
7166#[target_feature(enable = "neon")]
7167#[cfg_attr(test, assert_instr(fcvtn))]
7168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7169pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7170    unsafe { simd_cast(a) }
7171}
7172#[doc = "Floating-point convert to higher precision long"]
7173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7174#[inline]
7175#[target_feature(enable = "neon")]
7176#[cfg_attr(test, assert_instr(fcvtl))]
7177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7178pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7179    unsafe { simd_cast(a) }
7180}
7181#[doc = "Fixed-point convert to floating-point"]
7182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7183#[inline]
7184#[target_feature(enable = "neon")]
7185#[cfg_attr(test, assert_instr(scvtf))]
7186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7187pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7188    unsafe { simd_cast(a) }
7189}
7190#[doc = "Fixed-point convert to floating-point"]
7191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7192#[inline]
7193#[target_feature(enable = "neon")]
7194#[cfg_attr(test, assert_instr(scvtf))]
7195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7196pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7197    unsafe { simd_cast(a) }
7198}
7199#[doc = "Fixed-point convert to floating-point"]
7200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7201#[inline]
7202#[target_feature(enable = "neon")]
7203#[cfg_attr(test, assert_instr(ucvtf))]
7204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7205pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7206    unsafe { simd_cast(a) }
7207}
7208#[doc = "Fixed-point convert to floating-point"]
7209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7210#[inline]
7211#[target_feature(enable = "neon")]
7212#[cfg_attr(test, assert_instr(ucvtf))]
7213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7214pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7215    unsafe { simd_cast(a) }
7216}
7217#[doc = "Floating-point convert to lower precision"]
7218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7219#[inline]
7220#[target_feature(enable = "neon")]
7221#[cfg_attr(test, assert_instr(fcvtn2))]
7222#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7223#[cfg(not(target_arch = "arm64ec"))]
7224pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7225    vcombine_f16(a, vcvt_f16_f32(b))
7226}
7227#[doc = "Floating-point convert to higher precision"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7229#[inline]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtl2))]
7232#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7233#[cfg(not(target_arch = "arm64ec"))]
7234pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7235    vcvt_f32_f16(vget_high_f16(a))
7236}
7237#[doc = "Floating-point convert to lower precision narrow"]
7238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7239#[inline]
7240#[target_feature(enable = "neon")]
7241#[cfg_attr(test, assert_instr(fcvtn2))]
7242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7243pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7244    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7245}
7246#[doc = "Floating-point convert to higher precision long"]
7247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7248#[inline]
7249#[target_feature(enable = "neon")]
7250#[cfg_attr(test, assert_instr(fcvtl2))]
7251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7252pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7253    unsafe {
7254        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7255        simd_cast(b)
7256    }
7257}
7258#[doc = "Fixed-point convert to floating-point"]
7259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7260#[inline]
7261#[target_feature(enable = "neon")]
7262#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7263#[rustc_legacy_const_generics(1)]
7264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7265pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7266    static_assert!(N >= 1 && N <= 64);
7267    unsafe extern "unadjusted" {
7268        #[cfg_attr(
7269            any(target_arch = "aarch64", target_arch = "arm64ec"),
7270            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7271        )]
7272        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7273    }
7274    unsafe { _vcvt_n_f64_s64(a, N) }
7275}
7276#[doc = "Fixed-point convert to floating-point"]
7277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7278#[inline]
7279#[target_feature(enable = "neon")]
7280#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7281#[rustc_legacy_const_generics(1)]
7282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7283pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7284    static_assert!(N >= 1 && N <= 64);
7285    unsafe extern "unadjusted" {
7286        #[cfg_attr(
7287            any(target_arch = "aarch64", target_arch = "arm64ec"),
7288            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7289        )]
7290        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7291    }
7292    unsafe { _vcvtq_n_f64_s64(a, N) }
7293}
7294#[doc = "Fixed-point convert to floating-point"]
7295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7296#[inline]
7297#[target_feature(enable = "neon")]
7298#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7299#[rustc_legacy_const_generics(1)]
7300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7301pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7302    static_assert!(N >= 1 && N <= 64);
7303    unsafe extern "unadjusted" {
7304        #[cfg_attr(
7305            any(target_arch = "aarch64", target_arch = "arm64ec"),
7306            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7307        )]
7308        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7309    }
7310    unsafe { _vcvt_n_f64_u64(a, N) }
7311}
7312#[doc = "Fixed-point convert to floating-point"]
7313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7314#[inline]
7315#[target_feature(enable = "neon")]
7316#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7317#[rustc_legacy_const_generics(1)]
7318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7319pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7320    static_assert!(N >= 1 && N <= 64);
7321    unsafe extern "unadjusted" {
7322        #[cfg_attr(
7323            any(target_arch = "aarch64", target_arch = "arm64ec"),
7324            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7325        )]
7326        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7327    }
7328    unsafe { _vcvtq_n_f64_u64(a, N) }
7329}
7330#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7332#[inline]
7333#[target_feature(enable = "neon")]
7334#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7335#[rustc_legacy_const_generics(1)]
7336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7337pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7338    static_assert!(N >= 1 && N <= 64);
7339    unsafe extern "unadjusted" {
7340        #[cfg_attr(
7341            any(target_arch = "aarch64", target_arch = "arm64ec"),
7342            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7343        )]
7344        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7345    }
7346    unsafe { _vcvt_n_s64_f64(a, N) }
7347}
7348#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7350#[inline]
7351#[target_feature(enable = "neon")]
7352#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7353#[rustc_legacy_const_generics(1)]
7354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7355pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7356    static_assert!(N >= 1 && N <= 64);
7357    unsafe extern "unadjusted" {
7358        #[cfg_attr(
7359            any(target_arch = "aarch64", target_arch = "arm64ec"),
7360            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7361        )]
7362        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7363    }
7364    unsafe { _vcvtq_n_s64_f64(a, N) }
7365}
7366#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7368#[inline]
7369#[target_feature(enable = "neon")]
7370#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7371#[rustc_legacy_const_generics(1)]
7372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7373pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7374    static_assert!(N >= 1 && N <= 64);
7375    unsafe extern "unadjusted" {
7376        #[cfg_attr(
7377            any(target_arch = "aarch64", target_arch = "arm64ec"),
7378            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7379        )]
7380        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7381    }
7382    unsafe { _vcvt_n_u64_f64(a, N) }
7383}
7384#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7386#[inline]
7387#[target_feature(enable = "neon")]
7388#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7389#[rustc_legacy_const_generics(1)]
7390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7391pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7392    static_assert!(N >= 1 && N <= 64);
7393    unsafe extern "unadjusted" {
7394        #[cfg_attr(
7395            any(target_arch = "aarch64", target_arch = "arm64ec"),
7396            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7397        )]
7398        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7399    }
7400    unsafe { _vcvtq_n_u64_f64(a, N) }
7401}
7402#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7404#[inline]
7405#[target_feature(enable = "neon")]
7406#[cfg_attr(test, assert_instr(fcvtzs))]
7407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7408pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7409    unsafe extern "unadjusted" {
7410        #[cfg_attr(
7411            any(target_arch = "aarch64", target_arch = "arm64ec"),
7412            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7413        )]
7414        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7415    }
7416    unsafe { _vcvt_s64_f64(a) }
7417}
7418#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7420#[inline]
7421#[target_feature(enable = "neon")]
7422#[cfg_attr(test, assert_instr(fcvtzs))]
7423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7424pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7425    unsafe extern "unadjusted" {
7426        #[cfg_attr(
7427            any(target_arch = "aarch64", target_arch = "arm64ec"),
7428            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7429        )]
7430        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7431    }
7432    unsafe { _vcvtq_s64_f64(a) }
7433}
7434#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7436#[inline]
7437#[target_feature(enable = "neon")]
7438#[cfg_attr(test, assert_instr(fcvtzu))]
7439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7440pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7441    unsafe extern "unadjusted" {
7442        #[cfg_attr(
7443            any(target_arch = "aarch64", target_arch = "arm64ec"),
7444            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7445        )]
7446        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7447    }
7448    unsafe { _vcvt_u64_f64(a) }
7449}
7450#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7452#[inline]
7453#[target_feature(enable = "neon")]
7454#[cfg_attr(test, assert_instr(fcvtzu))]
7455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7456pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7457    unsafe extern "unadjusted" {
7458        #[cfg_attr(
7459            any(target_arch = "aarch64", target_arch = "arm64ec"),
7460            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7461        )]
7462        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7463    }
7464    unsafe { _vcvtq_u64_f64(a) }
7465}
7466#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7468#[inline]
7469#[cfg_attr(test, assert_instr(fcvtas))]
7470#[target_feature(enable = "neon,fp16")]
7471#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7472#[cfg(not(target_arch = "arm64ec"))]
7473pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7474    unsafe extern "unadjusted" {
7475        #[cfg_attr(
7476            any(target_arch = "aarch64", target_arch = "arm64ec"),
7477            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7478        )]
7479        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7480    }
7481    unsafe { _vcvta_s16_f16(a) }
7482}
7483#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7485#[inline]
7486#[cfg_attr(test, assert_instr(fcvtas))]
7487#[target_feature(enable = "neon,fp16")]
7488#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7489#[cfg(not(target_arch = "arm64ec"))]
7490pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7491    unsafe extern "unadjusted" {
7492        #[cfg_attr(
7493            any(target_arch = "aarch64", target_arch = "arm64ec"),
7494            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7495        )]
7496        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7497    }
7498    unsafe { _vcvtaq_s16_f16(a) }
7499}
7500#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7502#[inline]
7503#[target_feature(enable = "neon")]
7504#[cfg_attr(test, assert_instr(fcvtas))]
7505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7506pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7507    unsafe extern "unadjusted" {
7508        #[cfg_attr(
7509            any(target_arch = "aarch64", target_arch = "arm64ec"),
7510            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7511        )]
7512        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7513    }
7514    unsafe { _vcvta_s32_f32(a) }
7515}
7516#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7518#[inline]
7519#[target_feature(enable = "neon")]
7520#[cfg_attr(test, assert_instr(fcvtas))]
7521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7522pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7523    unsafe extern "unadjusted" {
7524        #[cfg_attr(
7525            any(target_arch = "aarch64", target_arch = "arm64ec"),
7526            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7527        )]
7528        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7529    }
7530    unsafe { _vcvtaq_s32_f32(a) }
7531}
7532#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7534#[inline]
7535#[target_feature(enable = "neon")]
7536#[cfg_attr(test, assert_instr(fcvtas))]
7537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7538pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7539    unsafe extern "unadjusted" {
7540        #[cfg_attr(
7541            any(target_arch = "aarch64", target_arch = "arm64ec"),
7542            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7543        )]
7544        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7545    }
7546    unsafe { _vcvta_s64_f64(a) }
7547}
7548#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7550#[inline]
7551#[target_feature(enable = "neon")]
7552#[cfg_attr(test, assert_instr(fcvtas))]
7553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7554pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7555    unsafe extern "unadjusted" {
7556        #[cfg_attr(
7557            any(target_arch = "aarch64", target_arch = "arm64ec"),
7558            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7559        )]
7560        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7561    }
7562    unsafe { _vcvtaq_s64_f64(a) }
7563}
7564#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7566#[inline]
7567#[cfg_attr(test, assert_instr(fcvtau))]
7568#[target_feature(enable = "neon,fp16")]
7569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7570#[cfg(not(target_arch = "arm64ec"))]
7571pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7572    unsafe extern "unadjusted" {
7573        #[cfg_attr(
7574            any(target_arch = "aarch64", target_arch = "arm64ec"),
7575            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7576        )]
7577        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7578    }
7579    unsafe { _vcvta_u16_f16(a) }
7580}
7581#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7583#[inline]
7584#[cfg_attr(test, assert_instr(fcvtau))]
7585#[target_feature(enable = "neon,fp16")]
7586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7587#[cfg(not(target_arch = "arm64ec"))]
7588pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7589    unsafe extern "unadjusted" {
7590        #[cfg_attr(
7591            any(target_arch = "aarch64", target_arch = "arm64ec"),
7592            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7593        )]
7594        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7595    }
7596    unsafe { _vcvtaq_u16_f16(a) }
7597}
7598#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7600#[inline]
7601#[target_feature(enable = "neon")]
7602#[cfg_attr(test, assert_instr(fcvtau))]
7603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7604pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7605    unsafe extern "unadjusted" {
7606        #[cfg_attr(
7607            any(target_arch = "aarch64", target_arch = "arm64ec"),
7608            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7609        )]
7610        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7611    }
7612    unsafe { _vcvta_u32_f32(a) }
7613}
7614#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7616#[inline]
7617#[target_feature(enable = "neon")]
7618#[cfg_attr(test, assert_instr(fcvtau))]
7619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7620pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7621    unsafe extern "unadjusted" {
7622        #[cfg_attr(
7623            any(target_arch = "aarch64", target_arch = "arm64ec"),
7624            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7625        )]
7626        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7627    }
7628    unsafe { _vcvtaq_u32_f32(a) }
7629}
7630#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7632#[inline]
7633#[target_feature(enable = "neon")]
7634#[cfg_attr(test, assert_instr(fcvtau))]
7635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7636pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7637    unsafe extern "unadjusted" {
7638        #[cfg_attr(
7639            any(target_arch = "aarch64", target_arch = "arm64ec"),
7640            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7641        )]
7642        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7643    }
7644    unsafe { _vcvta_u64_f64(a) }
7645}
7646#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7648#[inline]
7649#[target_feature(enable = "neon")]
7650#[cfg_attr(test, assert_instr(fcvtau))]
7651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7652pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7653    unsafe extern "unadjusted" {
7654        #[cfg_attr(
7655            any(target_arch = "aarch64", target_arch = "arm64ec"),
7656            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7657        )]
7658        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7659    }
7660    unsafe { _vcvtaq_u64_f64(a) }
7661}
7662#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7664#[inline]
7665#[cfg_attr(test, assert_instr(fcvtas))]
7666#[target_feature(enable = "neon,fp16")]
7667#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7668#[cfg(not(target_arch = "arm64ec"))]
7669pub fn vcvtah_s16_f16(a: f16) -> i16 {
7670    vcvtah_s32_f16(a) as i16
7671}
7672#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7674#[inline]
7675#[cfg_attr(test, assert_instr(fcvtas))]
7676#[target_feature(enable = "neon,fp16")]
7677#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7678#[cfg(not(target_arch = "arm64ec"))]
7679pub fn vcvtah_s32_f16(a: f16) -> i32 {
7680    unsafe extern "unadjusted" {
7681        #[cfg_attr(
7682            any(target_arch = "aarch64", target_arch = "arm64ec"),
7683            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7684        )]
7685        fn _vcvtah_s32_f16(a: f16) -> i32;
7686    }
7687    unsafe { _vcvtah_s32_f16(a) }
7688}
7689#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7691#[inline]
7692#[cfg_attr(test, assert_instr(fcvtas))]
7693#[target_feature(enable = "neon,fp16")]
7694#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7695#[cfg(not(target_arch = "arm64ec"))]
7696pub fn vcvtah_s64_f16(a: f16) -> i64 {
7697    unsafe extern "unadjusted" {
7698        #[cfg_attr(
7699            any(target_arch = "aarch64", target_arch = "arm64ec"),
7700            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7701        )]
7702        fn _vcvtah_s64_f16(a: f16) -> i64;
7703    }
7704    unsafe { _vcvtah_s64_f16(a) }
7705}
7706#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7708#[inline]
7709#[cfg_attr(test, assert_instr(fcvtau))]
7710#[target_feature(enable = "neon,fp16")]
7711#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7712#[cfg(not(target_arch = "arm64ec"))]
7713pub fn vcvtah_u16_f16(a: f16) -> u16 {
7714    vcvtah_u32_f16(a) as u16
7715}
7716#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7718#[inline]
7719#[cfg_attr(test, assert_instr(fcvtau))]
7720#[target_feature(enable = "neon,fp16")]
7721#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7722#[cfg(not(target_arch = "arm64ec"))]
7723pub fn vcvtah_u32_f16(a: f16) -> u32 {
7724    unsafe extern "unadjusted" {
7725        #[cfg_attr(
7726            any(target_arch = "aarch64", target_arch = "arm64ec"),
7727            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7728        )]
7729        fn _vcvtah_u32_f16(a: f16) -> u32;
7730    }
7731    unsafe { _vcvtah_u32_f16(a) }
7732}
7733#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7735#[inline]
7736#[cfg_attr(test, assert_instr(fcvtau))]
7737#[target_feature(enable = "neon,fp16")]
7738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7739#[cfg(not(target_arch = "arm64ec"))]
7740pub fn vcvtah_u64_f16(a: f16) -> u64 {
7741    unsafe extern "unadjusted" {
7742        #[cfg_attr(
7743            any(target_arch = "aarch64", target_arch = "arm64ec"),
7744            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7745        )]
7746        fn _vcvtah_u64_f16(a: f16) -> u64;
7747    }
7748    unsafe { _vcvtah_u64_f16(a) }
7749}
7750#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7752#[inline]
7753#[target_feature(enable = "neon")]
7754#[cfg_attr(test, assert_instr(fcvtas))]
7755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7756pub fn vcvtas_s32_f32(a: f32) -> i32 {
7757    unsafe extern "unadjusted" {
7758        #[cfg_attr(
7759            any(target_arch = "aarch64", target_arch = "arm64ec"),
7760            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7761        )]
7762        fn _vcvtas_s32_f32(a: f32) -> i32;
7763    }
7764    unsafe { _vcvtas_s32_f32(a) }
7765}
7766#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7768#[inline]
7769#[target_feature(enable = "neon")]
7770#[cfg_attr(test, assert_instr(fcvtas))]
7771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7772pub fn vcvtad_s64_f64(a: f64) -> i64 {
7773    unsafe extern "unadjusted" {
7774        #[cfg_attr(
7775            any(target_arch = "aarch64", target_arch = "arm64ec"),
7776            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7777        )]
7778        fn _vcvtad_s64_f64(a: f64) -> i64;
7779    }
7780    unsafe { _vcvtad_s64_f64(a) }
7781}
7782#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7784#[inline]
7785#[target_feature(enable = "neon")]
7786#[cfg_attr(test, assert_instr(fcvtau))]
7787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7788pub fn vcvtas_u32_f32(a: f32) -> u32 {
7789    unsafe extern "unadjusted" {
7790        #[cfg_attr(
7791            any(target_arch = "aarch64", target_arch = "arm64ec"),
7792            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7793        )]
7794        fn _vcvtas_u32_f32(a: f32) -> u32;
7795    }
7796    unsafe { _vcvtas_u32_f32(a) }
7797}
7798#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7800#[inline]
7801#[target_feature(enable = "neon")]
7802#[cfg_attr(test, assert_instr(fcvtau))]
7803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7804pub fn vcvtad_u64_f64(a: f64) -> u64 {
7805    unsafe extern "unadjusted" {
7806        #[cfg_attr(
7807            any(target_arch = "aarch64", target_arch = "arm64ec"),
7808            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7809        )]
7810        fn _vcvtad_u64_f64(a: f64) -> u64;
7811    }
7812    unsafe { _vcvtad_u64_f64(a) }
7813}
7814#[doc = "Fixed-point convert to floating-point"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7816#[inline]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(scvtf))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtd_f64_s64(a: i64) -> f64 {
7821    a as f64
7822}
7823#[doc = "Fixed-point convert to floating-point"]
7824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7825#[inline]
7826#[target_feature(enable = "neon")]
7827#[cfg_attr(test, assert_instr(scvtf))]
7828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7829pub fn vcvts_f32_s32(a: i32) -> f32 {
7830    a as f32
7831}
7832#[doc = "Fixed-point convert to floating-point"]
7833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7834#[inline]
7835#[cfg_attr(test, assert_instr(scvtf))]
7836#[target_feature(enable = "neon,fp16")]
7837#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7838#[cfg(not(target_arch = "arm64ec"))]
7839pub fn vcvth_f16_s16(a: i16) -> f16 {
7840    a as f16
7841}
7842#[doc = "Fixed-point convert to floating-point"]
7843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7844#[inline]
7845#[cfg_attr(test, assert_instr(scvtf))]
7846#[target_feature(enable = "neon,fp16")]
7847#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7848#[cfg(not(target_arch = "arm64ec"))]
7849pub fn vcvth_f16_s32(a: i32) -> f16 {
7850    a as f16
7851}
7852#[doc = "Fixed-point convert to floating-point"]
7853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7854#[inline]
7855#[cfg_attr(test, assert_instr(scvtf))]
7856#[target_feature(enable = "neon,fp16")]
7857#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7858#[cfg(not(target_arch = "arm64ec"))]
7859pub fn vcvth_f16_s64(a: i64) -> f16 {
7860    a as f16
7861}
7862#[doc = "Unsigned fixed-point convert to floating-point"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7864#[inline]
7865#[cfg_attr(test, assert_instr(ucvtf))]
7866#[target_feature(enable = "neon,fp16")]
7867#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7868#[cfg(not(target_arch = "arm64ec"))]
7869pub fn vcvth_f16_u16(a: u16) -> f16 {
7870    a as f16
7871}
7872#[doc = "Unsigned fixed-point convert to floating-point"]
7873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7874#[inline]
7875#[cfg_attr(test, assert_instr(ucvtf))]
7876#[target_feature(enable = "neon,fp16")]
7877#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7878#[cfg(not(target_arch = "arm64ec"))]
7879pub fn vcvth_f16_u32(a: u32) -> f16 {
7880    a as f16
7881}
7882#[doc = "Unsigned fixed-point convert to floating-point"]
7883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7884#[inline]
7885#[cfg_attr(test, assert_instr(ucvtf))]
7886#[target_feature(enable = "neon,fp16")]
7887#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7888#[cfg(not(target_arch = "arm64ec"))]
7889pub fn vcvth_f16_u64(a: u64) -> f16 {
7890    a as f16
7891}
7892#[doc = "Fixed-point convert to floating-point"]
7893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7894#[inline]
7895#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7896#[rustc_legacy_const_generics(1)]
7897#[target_feature(enable = "neon,fp16")]
7898#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7899#[cfg(not(target_arch = "arm64ec"))]
7900pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7901    static_assert!(N >= 1 && N <= 16);
7902    vcvth_n_f16_s32::<N>(a as i32)
7903}
7904#[doc = "Fixed-point convert to floating-point"]
7905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7906#[inline]
7907#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7908#[rustc_legacy_const_generics(1)]
7909#[target_feature(enable = "neon,fp16")]
7910#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7911#[cfg(not(target_arch = "arm64ec"))]
7912pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7913    static_assert!(N >= 1 && N <= 16);
7914    unsafe extern "unadjusted" {
7915        #[cfg_attr(
7916            any(target_arch = "aarch64", target_arch = "arm64ec"),
7917            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7918        )]
7919        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7920    }
7921    unsafe { _vcvth_n_f16_s32(a, N) }
7922}
7923#[doc = "Fixed-point convert to floating-point"]
7924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7925#[inline]
7926#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7927#[rustc_legacy_const_generics(1)]
7928#[target_feature(enable = "neon,fp16")]
7929#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7930#[cfg(not(target_arch = "arm64ec"))]
7931pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7932    static_assert!(N >= 1 && N <= 16);
7933    unsafe extern "unadjusted" {
7934        #[cfg_attr(
7935            any(target_arch = "aarch64", target_arch = "arm64ec"),
7936            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7937        )]
7938        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7939    }
7940    unsafe { _vcvth_n_f16_s64(a, N) }
7941}
7942#[doc = "Fixed-point convert to floating-point"]
7943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7944#[inline]
7945#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7946#[rustc_legacy_const_generics(1)]
7947#[target_feature(enable = "neon,fp16")]
7948#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7949#[cfg(not(target_arch = "arm64ec"))]
7950pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7951    static_assert!(N >= 1 && N <= 16);
7952    vcvth_n_f16_u32::<N>(a as u32)
7953}
7954#[doc = "Fixed-point convert to floating-point"]
7955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7956#[inline]
7957#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7958#[rustc_legacy_const_generics(1)]
7959#[target_feature(enable = "neon,fp16")]
7960#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7961#[cfg(not(target_arch = "arm64ec"))]
7962pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7963    static_assert!(N >= 1 && N <= 16);
7964    unsafe extern "unadjusted" {
7965        #[cfg_attr(
7966            any(target_arch = "aarch64", target_arch = "arm64ec"),
7967            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7968        )]
7969        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7970    }
7971    unsafe { _vcvth_n_f16_u32(a, N) }
7972}
7973#[doc = "Fixed-point convert to floating-point"]
7974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7975#[inline]
7976#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7977#[rustc_legacy_const_generics(1)]
7978#[target_feature(enable = "neon,fp16")]
7979#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7980#[cfg(not(target_arch = "arm64ec"))]
7981pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
7982    static_assert!(N >= 1 && N <= 16);
7983    unsafe extern "unadjusted" {
7984        #[cfg_attr(
7985            any(target_arch = "aarch64", target_arch = "arm64ec"),
7986            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
7987        )]
7988        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
7989    }
7990    unsafe { _vcvth_n_f16_u64(a, N) }
7991}
7992#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
7994#[inline]
7995#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7996#[rustc_legacy_const_generics(1)]
7997#[target_feature(enable = "neon,fp16")]
7998#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7999#[cfg(not(target_arch = "arm64ec"))]
8000pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8001    static_assert!(N >= 1 && N <= 16);
8002    vcvth_n_s32_f16::<N>(a) as i16
8003}
8004#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8006#[inline]
8007#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8008#[rustc_legacy_const_generics(1)]
8009#[target_feature(enable = "neon,fp16")]
8010#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8011#[cfg(not(target_arch = "arm64ec"))]
8012pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8013    static_assert!(N >= 1 && N <= 16);
8014    unsafe extern "unadjusted" {
8015        #[cfg_attr(
8016            any(target_arch = "aarch64", target_arch = "arm64ec"),
8017            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8018        )]
8019        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8020    }
8021    unsafe { _vcvth_n_s32_f16(a, N) }
8022}
8023#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8025#[inline]
8026#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8027#[rustc_legacy_const_generics(1)]
8028#[target_feature(enable = "neon,fp16")]
8029#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8030#[cfg(not(target_arch = "arm64ec"))]
8031pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8032    static_assert!(N >= 1 && N <= 16);
8033    unsafe extern "unadjusted" {
8034        #[cfg_attr(
8035            any(target_arch = "aarch64", target_arch = "arm64ec"),
8036            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8037        )]
8038        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8039    }
8040    unsafe { _vcvth_n_s64_f16(a, N) }
8041}
8042#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8044#[inline]
8045#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8046#[rustc_legacy_const_generics(1)]
8047#[target_feature(enable = "neon,fp16")]
8048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8049#[cfg(not(target_arch = "arm64ec"))]
8050pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8051    static_assert!(N >= 1 && N <= 16);
8052    vcvth_n_u32_f16::<N>(a) as u16
8053}
8054#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8056#[inline]
8057#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8058#[rustc_legacy_const_generics(1)]
8059#[target_feature(enable = "neon,fp16")]
8060#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8061#[cfg(not(target_arch = "arm64ec"))]
8062pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8063    static_assert!(N >= 1 && N <= 16);
8064    unsafe extern "unadjusted" {
8065        #[cfg_attr(
8066            any(target_arch = "aarch64", target_arch = "arm64ec"),
8067            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8068        )]
8069        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8070    }
8071    unsafe { _vcvth_n_u32_f16(a, N) }
8072}
8073#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8075#[inline]
8076#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8077#[rustc_legacy_const_generics(1)]
8078#[target_feature(enable = "neon,fp16")]
8079#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8080#[cfg(not(target_arch = "arm64ec"))]
8081pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8082    static_assert!(N >= 1 && N <= 16);
8083    unsafe extern "unadjusted" {
8084        #[cfg_attr(
8085            any(target_arch = "aarch64", target_arch = "arm64ec"),
8086            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8087        )]
8088        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8089    }
8090    unsafe { _vcvth_n_u64_f16(a, N) }
8091}
8092#[doc = "Floating-point convert to signed fixed-point"]
8093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8094#[inline]
8095#[cfg_attr(test, assert_instr(fcvtzs))]
8096#[target_feature(enable = "neon,fp16")]
8097#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8098#[cfg(not(target_arch = "arm64ec"))]
8099pub fn vcvth_s16_f16(a: f16) -> i16 {
8100    a as i16
8101}
8102#[doc = "Floating-point convert to signed fixed-point"]
8103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8104#[inline]
8105#[cfg_attr(test, assert_instr(fcvtzs))]
8106#[target_feature(enable = "neon,fp16")]
8107#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8108#[cfg(not(target_arch = "arm64ec"))]
8109pub fn vcvth_s32_f16(a: f16) -> i32 {
8110    a as i32
8111}
8112#[doc = "Floating-point convert to signed fixed-point"]
8113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8114#[inline]
8115#[cfg_attr(test, assert_instr(fcvtzs))]
8116#[target_feature(enable = "neon,fp16")]
8117#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8118#[cfg(not(target_arch = "arm64ec"))]
8119pub fn vcvth_s64_f16(a: f16) -> i64 {
8120    a as i64
8121}
8122#[doc = "Floating-point convert to unsigned fixed-point"]
8123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8124#[inline]
8125#[cfg_attr(test, assert_instr(fcvtzu))]
8126#[target_feature(enable = "neon,fp16")]
8127#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8128#[cfg(not(target_arch = "arm64ec"))]
8129pub fn vcvth_u16_f16(a: f16) -> u16 {
8130    a as u16
8131}
8132#[doc = "Floating-point convert to unsigned fixed-point"]
8133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8134#[inline]
8135#[cfg_attr(test, assert_instr(fcvtzu))]
8136#[target_feature(enable = "neon,fp16")]
8137#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8138#[cfg(not(target_arch = "arm64ec"))]
8139pub fn vcvth_u32_f16(a: f16) -> u32 {
8140    a as u32
8141}
8142#[doc = "Floating-point convert to unsigned fixed-point"]
8143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8144#[inline]
8145#[cfg_attr(test, assert_instr(fcvtzu))]
8146#[target_feature(enable = "neon,fp16")]
8147#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8148#[cfg(not(target_arch = "arm64ec"))]
8149pub fn vcvth_u64_f16(a: f16) -> u64 {
8150    a as u64
8151}
8152#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8154#[inline]
8155#[cfg_attr(test, assert_instr(fcvtms))]
8156#[target_feature(enable = "neon,fp16")]
8157#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8158#[cfg(not(target_arch = "arm64ec"))]
8159pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8160    unsafe extern "unadjusted" {
8161        #[cfg_attr(
8162            any(target_arch = "aarch64", target_arch = "arm64ec"),
8163            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8164        )]
8165        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8166    }
8167    unsafe { _vcvtm_s16_f16(a) }
8168}
8169#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8171#[inline]
8172#[cfg_attr(test, assert_instr(fcvtms))]
8173#[target_feature(enable = "neon,fp16")]
8174#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8175#[cfg(not(target_arch = "arm64ec"))]
8176pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8177    unsafe extern "unadjusted" {
8178        #[cfg_attr(
8179            any(target_arch = "aarch64", target_arch = "arm64ec"),
8180            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8181        )]
8182        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8183    }
8184    unsafe { _vcvtmq_s16_f16(a) }
8185}
8186#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8188#[inline]
8189#[target_feature(enable = "neon")]
8190#[cfg_attr(test, assert_instr(fcvtms))]
8191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8192pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8193    unsafe extern "unadjusted" {
8194        #[cfg_attr(
8195            any(target_arch = "aarch64", target_arch = "arm64ec"),
8196            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8197        )]
8198        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8199    }
8200    unsafe { _vcvtm_s32_f32(a) }
8201}
8202#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8204#[inline]
8205#[target_feature(enable = "neon")]
8206#[cfg_attr(test, assert_instr(fcvtms))]
8207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8208pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8209    unsafe extern "unadjusted" {
8210        #[cfg_attr(
8211            any(target_arch = "aarch64", target_arch = "arm64ec"),
8212            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8213        )]
8214        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8215    }
8216    unsafe { _vcvtmq_s32_f32(a) }
8217}
8218#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8220#[inline]
8221#[target_feature(enable = "neon")]
8222#[cfg_attr(test, assert_instr(fcvtms))]
8223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8224pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8225    unsafe extern "unadjusted" {
8226        #[cfg_attr(
8227            any(target_arch = "aarch64", target_arch = "arm64ec"),
8228            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8229        )]
8230        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8231    }
8232    unsafe { _vcvtm_s64_f64(a) }
8233}
8234#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8236#[inline]
8237#[target_feature(enable = "neon")]
8238#[cfg_attr(test, assert_instr(fcvtms))]
8239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8240pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8241    unsafe extern "unadjusted" {
8242        #[cfg_attr(
8243            any(target_arch = "aarch64", target_arch = "arm64ec"),
8244            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8245        )]
8246        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8247    }
8248    unsafe { _vcvtmq_s64_f64(a) }
8249}
8250#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8252#[inline]
8253#[cfg_attr(test, assert_instr(fcvtmu))]
8254#[target_feature(enable = "neon,fp16")]
8255#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8256#[cfg(not(target_arch = "arm64ec"))]
8257pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8258    unsafe extern "unadjusted" {
8259        #[cfg_attr(
8260            any(target_arch = "aarch64", target_arch = "arm64ec"),
8261            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8262        )]
8263        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8264    }
8265    unsafe { _vcvtm_u16_f16(a) }
8266}
8267#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8269#[inline]
8270#[cfg_attr(test, assert_instr(fcvtmu))]
8271#[target_feature(enable = "neon,fp16")]
8272#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8273#[cfg(not(target_arch = "arm64ec"))]
8274pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8275    unsafe extern "unadjusted" {
8276        #[cfg_attr(
8277            any(target_arch = "aarch64", target_arch = "arm64ec"),
8278            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8279        )]
8280        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8281    }
8282    unsafe { _vcvtmq_u16_f16(a) }
8283}
8284#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8286#[inline]
8287#[target_feature(enable = "neon")]
8288#[cfg_attr(test, assert_instr(fcvtmu))]
8289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8290pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8291    unsafe extern "unadjusted" {
8292        #[cfg_attr(
8293            any(target_arch = "aarch64", target_arch = "arm64ec"),
8294            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8295        )]
8296        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8297    }
8298    unsafe { _vcvtm_u32_f32(a) }
8299}
8300#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8302#[inline]
8303#[target_feature(enable = "neon")]
8304#[cfg_attr(test, assert_instr(fcvtmu))]
8305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8306pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8307    unsafe extern "unadjusted" {
8308        #[cfg_attr(
8309            any(target_arch = "aarch64", target_arch = "arm64ec"),
8310            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8311        )]
8312        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8313    }
8314    unsafe { _vcvtmq_u32_f32(a) }
8315}
8316#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8318#[inline]
8319#[target_feature(enable = "neon")]
8320#[cfg_attr(test, assert_instr(fcvtmu))]
8321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8322pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8323    unsafe extern "unadjusted" {
8324        #[cfg_attr(
8325            any(target_arch = "aarch64", target_arch = "arm64ec"),
8326            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8327        )]
8328        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8329    }
8330    unsafe { _vcvtm_u64_f64(a) }
8331}
8332#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8334#[inline]
8335#[target_feature(enable = "neon")]
8336#[cfg_attr(test, assert_instr(fcvtmu))]
8337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8338pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8339    unsafe extern "unadjusted" {
8340        #[cfg_attr(
8341            any(target_arch = "aarch64", target_arch = "arm64ec"),
8342            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8343        )]
8344        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8345    }
8346    unsafe { _vcvtmq_u64_f64(a) }
8347}
8348#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8350#[inline]
8351#[cfg_attr(test, assert_instr(fcvtms))]
8352#[target_feature(enable = "neon,fp16")]
8353#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8354#[cfg(not(target_arch = "arm64ec"))]
8355pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8356    vcvtmh_s32_f16(a) as i16
8357}
8358#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8360#[inline]
8361#[cfg_attr(test, assert_instr(fcvtms))]
8362#[target_feature(enable = "neon,fp16")]
8363#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8364#[cfg(not(target_arch = "arm64ec"))]
8365pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8366    unsafe extern "unadjusted" {
8367        #[cfg_attr(
8368            any(target_arch = "aarch64", target_arch = "arm64ec"),
8369            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8370        )]
8371        fn _vcvtmh_s32_f16(a: f16) -> i32;
8372    }
8373    unsafe { _vcvtmh_s32_f16(a) }
8374}
8375#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8377#[inline]
8378#[cfg_attr(test, assert_instr(fcvtms))]
8379#[target_feature(enable = "neon,fp16")]
8380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8381#[cfg(not(target_arch = "arm64ec"))]
8382pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8383    unsafe extern "unadjusted" {
8384        #[cfg_attr(
8385            any(target_arch = "aarch64", target_arch = "arm64ec"),
8386            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8387        )]
8388        fn _vcvtmh_s64_f16(a: f16) -> i64;
8389    }
8390    unsafe { _vcvtmh_s64_f16(a) }
8391}
8392#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8394#[inline]
8395#[cfg_attr(test, assert_instr(fcvtmu))]
8396#[target_feature(enable = "neon,fp16")]
8397#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8398#[cfg(not(target_arch = "arm64ec"))]
8399pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8400    vcvtmh_u32_f16(a) as u16
8401}
8402#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8404#[inline]
8405#[cfg_attr(test, assert_instr(fcvtmu))]
8406#[target_feature(enable = "neon,fp16")]
8407#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8408#[cfg(not(target_arch = "arm64ec"))]
8409pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8410    unsafe extern "unadjusted" {
8411        #[cfg_attr(
8412            any(target_arch = "aarch64", target_arch = "arm64ec"),
8413            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8414        )]
8415        fn _vcvtmh_u32_f16(a: f16) -> u32;
8416    }
8417    unsafe { _vcvtmh_u32_f16(a) }
8418}
8419#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8421#[inline]
8422#[cfg_attr(test, assert_instr(fcvtmu))]
8423#[target_feature(enable = "neon,fp16")]
8424#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8425#[cfg(not(target_arch = "arm64ec"))]
8426pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8427    unsafe extern "unadjusted" {
8428        #[cfg_attr(
8429            any(target_arch = "aarch64", target_arch = "arm64ec"),
8430            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8431        )]
8432        fn _vcvtmh_u64_f16(a: f16) -> u64;
8433    }
8434    unsafe { _vcvtmh_u64_f16(a) }
8435}
8436#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8438#[inline]
8439#[target_feature(enable = "neon")]
8440#[cfg_attr(test, assert_instr(fcvtms))]
8441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8442pub fn vcvtms_s32_f32(a: f32) -> i32 {
8443    unsafe extern "unadjusted" {
8444        #[cfg_attr(
8445            any(target_arch = "aarch64", target_arch = "arm64ec"),
8446            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8447        )]
8448        fn _vcvtms_s32_f32(a: f32) -> i32;
8449    }
8450    unsafe { _vcvtms_s32_f32(a) }
8451}
8452#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8454#[inline]
8455#[target_feature(enable = "neon")]
8456#[cfg_attr(test, assert_instr(fcvtms))]
8457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8458pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8459    unsafe extern "unadjusted" {
8460        #[cfg_attr(
8461            any(target_arch = "aarch64", target_arch = "arm64ec"),
8462            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8463        )]
8464        fn _vcvtmd_s64_f64(a: f64) -> i64;
8465    }
8466    unsafe { _vcvtmd_s64_f64(a) }
8467}
8468#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8470#[inline]
8471#[target_feature(enable = "neon")]
8472#[cfg_attr(test, assert_instr(fcvtmu))]
8473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8474pub fn vcvtms_u32_f32(a: f32) -> u32 {
8475    unsafe extern "unadjusted" {
8476        #[cfg_attr(
8477            any(target_arch = "aarch64", target_arch = "arm64ec"),
8478            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8479        )]
8480        fn _vcvtms_u32_f32(a: f32) -> u32;
8481    }
8482    unsafe { _vcvtms_u32_f32(a) }
8483}
8484#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8486#[inline]
8487#[target_feature(enable = "neon")]
8488#[cfg_attr(test, assert_instr(fcvtmu))]
8489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8490pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8491    unsafe extern "unadjusted" {
8492        #[cfg_attr(
8493            any(target_arch = "aarch64", target_arch = "arm64ec"),
8494            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8495        )]
8496        fn _vcvtmd_u64_f64(a: f64) -> u64;
8497    }
8498    unsafe { _vcvtmd_u64_f64(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8502#[inline]
8503#[cfg_attr(test, assert_instr(fcvtns))]
8504#[target_feature(enable = "neon,fp16")]
8505#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8506#[cfg(not(target_arch = "arm64ec"))]
8507pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8508    unsafe extern "unadjusted" {
8509        #[cfg_attr(
8510            any(target_arch = "aarch64", target_arch = "arm64ec"),
8511            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8512        )]
8513        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8514    }
8515    unsafe { _vcvtn_s16_f16(a) }
8516}
8517#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8519#[inline]
8520#[cfg_attr(test, assert_instr(fcvtns))]
8521#[target_feature(enable = "neon,fp16")]
8522#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8523#[cfg(not(target_arch = "arm64ec"))]
8524pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8525    unsafe extern "unadjusted" {
8526        #[cfg_attr(
8527            any(target_arch = "aarch64", target_arch = "arm64ec"),
8528            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8529        )]
8530        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8531    }
8532    unsafe { _vcvtnq_s16_f16(a) }
8533}
8534#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8536#[inline]
8537#[target_feature(enable = "neon")]
8538#[cfg_attr(test, assert_instr(fcvtns))]
8539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8540pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8541    unsafe extern "unadjusted" {
8542        #[cfg_attr(
8543            any(target_arch = "aarch64", target_arch = "arm64ec"),
8544            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8545        )]
8546        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8547    }
8548    unsafe { _vcvtn_s32_f32(a) }
8549}
8550#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8552#[inline]
8553#[target_feature(enable = "neon")]
8554#[cfg_attr(test, assert_instr(fcvtns))]
8555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8556pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8557    unsafe extern "unadjusted" {
8558        #[cfg_attr(
8559            any(target_arch = "aarch64", target_arch = "arm64ec"),
8560            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8561        )]
8562        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8563    }
8564    unsafe { _vcvtnq_s32_f32(a) }
8565}
8566#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8568#[inline]
8569#[target_feature(enable = "neon")]
8570#[cfg_attr(test, assert_instr(fcvtns))]
8571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8572pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8573    unsafe extern "unadjusted" {
8574        #[cfg_attr(
8575            any(target_arch = "aarch64", target_arch = "arm64ec"),
8576            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8577        )]
8578        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8579    }
8580    unsafe { _vcvtn_s64_f64(a) }
8581}
8582#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8584#[inline]
8585#[target_feature(enable = "neon")]
8586#[cfg_attr(test, assert_instr(fcvtns))]
8587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8588pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8589    unsafe extern "unadjusted" {
8590        #[cfg_attr(
8591            any(target_arch = "aarch64", target_arch = "arm64ec"),
8592            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8593        )]
8594        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8595    }
8596    unsafe { _vcvtnq_s64_f64(a) }
8597}
8598#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8600#[inline]
8601#[cfg_attr(test, assert_instr(fcvtnu))]
8602#[target_feature(enable = "neon,fp16")]
8603#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8604#[cfg(not(target_arch = "arm64ec"))]
8605pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8606    unsafe extern "unadjusted" {
8607        #[cfg_attr(
8608            any(target_arch = "aarch64", target_arch = "arm64ec"),
8609            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8610        )]
8611        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8612    }
8613    unsafe { _vcvtn_u16_f16(a) }
8614}
8615#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8617#[inline]
8618#[cfg_attr(test, assert_instr(fcvtnu))]
8619#[target_feature(enable = "neon,fp16")]
8620#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8621#[cfg(not(target_arch = "arm64ec"))]
8622pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8623    unsafe extern "unadjusted" {
8624        #[cfg_attr(
8625            any(target_arch = "aarch64", target_arch = "arm64ec"),
8626            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8627        )]
8628        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8629    }
8630    unsafe { _vcvtnq_u16_f16(a) }
8631}
8632#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8634#[inline]
8635#[target_feature(enable = "neon")]
8636#[cfg_attr(test, assert_instr(fcvtnu))]
8637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8638pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8639    unsafe extern "unadjusted" {
8640        #[cfg_attr(
8641            any(target_arch = "aarch64", target_arch = "arm64ec"),
8642            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8643        )]
8644        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8645    }
8646    unsafe { _vcvtn_u32_f32(a) }
8647}
8648#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8650#[inline]
8651#[target_feature(enable = "neon")]
8652#[cfg_attr(test, assert_instr(fcvtnu))]
8653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8654pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8655    unsafe extern "unadjusted" {
8656        #[cfg_attr(
8657            any(target_arch = "aarch64", target_arch = "arm64ec"),
8658            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8659        )]
8660        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8661    }
8662    unsafe { _vcvtnq_u32_f32(a) }
8663}
8664#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8666#[inline]
8667#[target_feature(enable = "neon")]
8668#[cfg_attr(test, assert_instr(fcvtnu))]
8669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8670pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8671    unsafe extern "unadjusted" {
8672        #[cfg_attr(
8673            any(target_arch = "aarch64", target_arch = "arm64ec"),
8674            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8675        )]
8676        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8677    }
8678    unsafe { _vcvtn_u64_f64(a) }
8679}
8680#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8682#[inline]
8683#[target_feature(enable = "neon")]
8684#[cfg_attr(test, assert_instr(fcvtnu))]
8685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8686pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8687    unsafe extern "unadjusted" {
8688        #[cfg_attr(
8689            any(target_arch = "aarch64", target_arch = "arm64ec"),
8690            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8691        )]
8692        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8693    }
8694    unsafe { _vcvtnq_u64_f64(a) }
8695}
8696#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8698#[inline]
8699#[cfg_attr(test, assert_instr(fcvtns))]
8700#[target_feature(enable = "neon,fp16")]
8701#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8702#[cfg(not(target_arch = "arm64ec"))]
8703pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8704    vcvtnh_s32_f16(a) as i16
8705}
8706#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8708#[inline]
8709#[cfg_attr(test, assert_instr(fcvtns))]
8710#[target_feature(enable = "neon,fp16")]
8711#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8712#[cfg(not(target_arch = "arm64ec"))]
8713pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8714    unsafe extern "unadjusted" {
8715        #[cfg_attr(
8716            any(target_arch = "aarch64", target_arch = "arm64ec"),
8717            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8718        )]
8719        fn _vcvtnh_s32_f16(a: f16) -> i32;
8720    }
8721    unsafe { _vcvtnh_s32_f16(a) }
8722}
8723#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8725#[inline]
8726#[cfg_attr(test, assert_instr(fcvtns))]
8727#[target_feature(enable = "neon,fp16")]
8728#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8729#[cfg(not(target_arch = "arm64ec"))]
8730pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8731    unsafe extern "unadjusted" {
8732        #[cfg_attr(
8733            any(target_arch = "aarch64", target_arch = "arm64ec"),
8734            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8735        )]
8736        fn _vcvtnh_s64_f16(a: f16) -> i64;
8737    }
8738    unsafe { _vcvtnh_s64_f16(a) }
8739}
8740#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8742#[inline]
8743#[cfg_attr(test, assert_instr(fcvtnu))]
8744#[target_feature(enable = "neon,fp16")]
8745#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8746#[cfg(not(target_arch = "arm64ec"))]
8747pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8748    vcvtnh_u32_f16(a) as u16
8749}
8750#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8752#[inline]
8753#[cfg_attr(test, assert_instr(fcvtnu))]
8754#[target_feature(enable = "neon,fp16")]
8755#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8756#[cfg(not(target_arch = "arm64ec"))]
8757pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8758    unsafe extern "unadjusted" {
8759        #[cfg_attr(
8760            any(target_arch = "aarch64", target_arch = "arm64ec"),
8761            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8762        )]
8763        fn _vcvtnh_u32_f16(a: f16) -> u32;
8764    }
8765    unsafe { _vcvtnh_u32_f16(a) }
8766}
8767#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8769#[inline]
8770#[cfg_attr(test, assert_instr(fcvtnu))]
8771#[target_feature(enable = "neon,fp16")]
8772#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8773#[cfg(not(target_arch = "arm64ec"))]
8774pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8775    unsafe extern "unadjusted" {
8776        #[cfg_attr(
8777            any(target_arch = "aarch64", target_arch = "arm64ec"),
8778            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8779        )]
8780        fn _vcvtnh_u64_f16(a: f16) -> u64;
8781    }
8782    unsafe { _vcvtnh_u64_f16(a) }
8783}
8784#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8786#[inline]
8787#[target_feature(enable = "neon")]
8788#[cfg_attr(test, assert_instr(fcvtns))]
8789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8790pub fn vcvtns_s32_f32(a: f32) -> i32 {
8791    unsafe extern "unadjusted" {
8792        #[cfg_attr(
8793            any(target_arch = "aarch64", target_arch = "arm64ec"),
8794            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8795        )]
8796        fn _vcvtns_s32_f32(a: f32) -> i32;
8797    }
8798    unsafe { _vcvtns_s32_f32(a) }
8799}
8800#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8802#[inline]
8803#[target_feature(enable = "neon")]
8804#[cfg_attr(test, assert_instr(fcvtns))]
8805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8806pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8807    unsafe extern "unadjusted" {
8808        #[cfg_attr(
8809            any(target_arch = "aarch64", target_arch = "arm64ec"),
8810            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8811        )]
8812        fn _vcvtnd_s64_f64(a: f64) -> i64;
8813    }
8814    unsafe { _vcvtnd_s64_f64(a) }
8815}
8816#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8818#[inline]
8819#[target_feature(enable = "neon")]
8820#[cfg_attr(test, assert_instr(fcvtnu))]
8821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8822pub fn vcvtns_u32_f32(a: f32) -> u32 {
8823    unsafe extern "unadjusted" {
8824        #[cfg_attr(
8825            any(target_arch = "aarch64", target_arch = "arm64ec"),
8826            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8827        )]
8828        fn _vcvtns_u32_f32(a: f32) -> u32;
8829    }
8830    unsafe { _vcvtns_u32_f32(a) }
8831}
8832#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8834#[inline]
8835#[target_feature(enable = "neon")]
8836#[cfg_attr(test, assert_instr(fcvtnu))]
8837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8838pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8839    unsafe extern "unadjusted" {
8840        #[cfg_attr(
8841            any(target_arch = "aarch64", target_arch = "arm64ec"),
8842            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8843        )]
8844        fn _vcvtnd_u64_f64(a: f64) -> u64;
8845    }
8846    unsafe { _vcvtnd_u64_f64(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8850#[inline]
8851#[cfg_attr(test, assert_instr(fcvtps))]
8852#[target_feature(enable = "neon,fp16")]
8853#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8854#[cfg(not(target_arch = "arm64ec"))]
8855pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8856    unsafe extern "unadjusted" {
8857        #[cfg_attr(
8858            any(target_arch = "aarch64", target_arch = "arm64ec"),
8859            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8860        )]
8861        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8862    }
8863    unsafe { _vcvtp_s16_f16(a) }
8864}
8865#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8867#[inline]
8868#[cfg_attr(test, assert_instr(fcvtps))]
8869#[target_feature(enable = "neon,fp16")]
8870#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8871#[cfg(not(target_arch = "arm64ec"))]
8872pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8873    unsafe extern "unadjusted" {
8874        #[cfg_attr(
8875            any(target_arch = "aarch64", target_arch = "arm64ec"),
8876            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8877        )]
8878        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8879    }
8880    unsafe { _vcvtpq_s16_f16(a) }
8881}
8882#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8884#[inline]
8885#[target_feature(enable = "neon")]
8886#[cfg_attr(test, assert_instr(fcvtps))]
8887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8888pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8889    unsafe extern "unadjusted" {
8890        #[cfg_attr(
8891            any(target_arch = "aarch64", target_arch = "arm64ec"),
8892            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8893        )]
8894        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8895    }
8896    unsafe { _vcvtp_s32_f32(a) }
8897}
8898#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8900#[inline]
8901#[target_feature(enable = "neon")]
8902#[cfg_attr(test, assert_instr(fcvtps))]
8903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8904pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8905    unsafe extern "unadjusted" {
8906        #[cfg_attr(
8907            any(target_arch = "aarch64", target_arch = "arm64ec"),
8908            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8909        )]
8910        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8911    }
8912    unsafe { _vcvtpq_s32_f32(a) }
8913}
8914#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8916#[inline]
8917#[target_feature(enable = "neon")]
8918#[cfg_attr(test, assert_instr(fcvtps))]
8919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8920pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8921    unsafe extern "unadjusted" {
8922        #[cfg_attr(
8923            any(target_arch = "aarch64", target_arch = "arm64ec"),
8924            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8925        )]
8926        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8927    }
8928    unsafe { _vcvtp_s64_f64(a) }
8929}
8930#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8932#[inline]
8933#[target_feature(enable = "neon")]
8934#[cfg_attr(test, assert_instr(fcvtps))]
8935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8936pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8937    unsafe extern "unadjusted" {
8938        #[cfg_attr(
8939            any(target_arch = "aarch64", target_arch = "arm64ec"),
8940            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8941        )]
8942        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8943    }
8944    unsafe { _vcvtpq_s64_f64(a) }
8945}
8946#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8948#[inline]
8949#[cfg_attr(test, assert_instr(fcvtpu))]
8950#[target_feature(enable = "neon,fp16")]
8951#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8952#[cfg(not(target_arch = "arm64ec"))]
8953pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8954    unsafe extern "unadjusted" {
8955        #[cfg_attr(
8956            any(target_arch = "aarch64", target_arch = "arm64ec"),
8957            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8958        )]
8959        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8960    }
8961    unsafe { _vcvtp_u16_f16(a) }
8962}
8963#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8965#[inline]
8966#[cfg_attr(test, assert_instr(fcvtpu))]
8967#[target_feature(enable = "neon,fp16")]
8968#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8969#[cfg(not(target_arch = "arm64ec"))]
8970pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8971    unsafe extern "unadjusted" {
8972        #[cfg_attr(
8973            any(target_arch = "aarch64", target_arch = "arm64ec"),
8974            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8975        )]
8976        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8977    }
8978    unsafe { _vcvtpq_u16_f16(a) }
8979}
8980#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8982#[inline]
8983#[target_feature(enable = "neon")]
8984#[cfg_attr(test, assert_instr(fcvtpu))]
8985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8986pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8987    unsafe extern "unadjusted" {
8988        #[cfg_attr(
8989            any(target_arch = "aarch64", target_arch = "arm64ec"),
8990            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8991        )]
8992        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8993    }
8994    unsafe { _vcvtp_u32_f32(a) }
8995}
8996#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8998#[inline]
8999#[target_feature(enable = "neon")]
9000#[cfg_attr(test, assert_instr(fcvtpu))]
9001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9002pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9003    unsafe extern "unadjusted" {
9004        #[cfg_attr(
9005            any(target_arch = "aarch64", target_arch = "arm64ec"),
9006            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9007        )]
9008        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9009    }
9010    unsafe { _vcvtpq_u32_f32(a) }
9011}
9012#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9014#[inline]
9015#[target_feature(enable = "neon")]
9016#[cfg_attr(test, assert_instr(fcvtpu))]
9017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9018pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9019    unsafe extern "unadjusted" {
9020        #[cfg_attr(
9021            any(target_arch = "aarch64", target_arch = "arm64ec"),
9022            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9023        )]
9024        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9025    }
9026    unsafe { _vcvtp_u64_f64(a) }
9027}
9028#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9030#[inline]
9031#[target_feature(enable = "neon")]
9032#[cfg_attr(test, assert_instr(fcvtpu))]
9033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9034pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9035    unsafe extern "unadjusted" {
9036        #[cfg_attr(
9037            any(target_arch = "aarch64", target_arch = "arm64ec"),
9038            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9039        )]
9040        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9041    }
9042    unsafe { _vcvtpq_u64_f64(a) }
9043}
9044#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9046#[inline]
9047#[cfg_attr(test, assert_instr(fcvtps))]
9048#[target_feature(enable = "neon,fp16")]
9049#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9050#[cfg(not(target_arch = "arm64ec"))]
9051pub fn vcvtph_s16_f16(a: f16) -> i16 {
9052    vcvtph_s32_f16(a) as i16
9053}
9054#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9056#[inline]
9057#[cfg_attr(test, assert_instr(fcvtps))]
9058#[target_feature(enable = "neon,fp16")]
9059#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9060#[cfg(not(target_arch = "arm64ec"))]
9061pub fn vcvtph_s32_f16(a: f16) -> i32 {
9062    unsafe extern "unadjusted" {
9063        #[cfg_attr(
9064            any(target_arch = "aarch64", target_arch = "arm64ec"),
9065            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9066        )]
9067        fn _vcvtph_s32_f16(a: f16) -> i32;
9068    }
9069    unsafe { _vcvtph_s32_f16(a) }
9070}
9071#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9073#[inline]
9074#[cfg_attr(test, assert_instr(fcvtps))]
9075#[target_feature(enable = "neon,fp16")]
9076#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9077#[cfg(not(target_arch = "arm64ec"))]
9078pub fn vcvtph_s64_f16(a: f16) -> i64 {
9079    unsafe extern "unadjusted" {
9080        #[cfg_attr(
9081            any(target_arch = "aarch64", target_arch = "arm64ec"),
9082            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9083        )]
9084        fn _vcvtph_s64_f16(a: f16) -> i64;
9085    }
9086    unsafe { _vcvtph_s64_f16(a) }
9087}
9088#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9090#[inline]
9091#[cfg_attr(test, assert_instr(fcvtpu))]
9092#[target_feature(enable = "neon,fp16")]
9093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9094#[cfg(not(target_arch = "arm64ec"))]
9095pub fn vcvtph_u16_f16(a: f16) -> u16 {
9096    vcvtph_u32_f16(a) as u16
9097}
9098#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9100#[inline]
9101#[cfg_attr(test, assert_instr(fcvtpu))]
9102#[target_feature(enable = "neon,fp16")]
9103#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9104#[cfg(not(target_arch = "arm64ec"))]
9105pub fn vcvtph_u32_f16(a: f16) -> u32 {
9106    unsafe extern "unadjusted" {
9107        #[cfg_attr(
9108            any(target_arch = "aarch64", target_arch = "arm64ec"),
9109            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9110        )]
9111        fn _vcvtph_u32_f16(a: f16) -> u32;
9112    }
9113    unsafe { _vcvtph_u32_f16(a) }
9114}
9115#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9117#[inline]
9118#[cfg_attr(test, assert_instr(fcvtpu))]
9119#[target_feature(enable = "neon,fp16")]
9120#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9121#[cfg(not(target_arch = "arm64ec"))]
9122pub fn vcvtph_u64_f16(a: f16) -> u64 {
9123    unsafe extern "unadjusted" {
9124        #[cfg_attr(
9125            any(target_arch = "aarch64", target_arch = "arm64ec"),
9126            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9127        )]
9128        fn _vcvtph_u64_f16(a: f16) -> u64;
9129    }
9130    unsafe { _vcvtph_u64_f16(a) }
9131}
9132#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9134#[inline]
9135#[target_feature(enable = "neon")]
9136#[cfg_attr(test, assert_instr(fcvtps))]
9137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9138pub fn vcvtps_s32_f32(a: f32) -> i32 {
9139    unsafe extern "unadjusted" {
9140        #[cfg_attr(
9141            any(target_arch = "aarch64", target_arch = "arm64ec"),
9142            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9143        )]
9144        fn _vcvtps_s32_f32(a: f32) -> i32;
9145    }
9146    unsafe { _vcvtps_s32_f32(a) }
9147}
9148#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9150#[inline]
9151#[target_feature(enable = "neon")]
9152#[cfg_attr(test, assert_instr(fcvtps))]
9153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9154pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9155    unsafe extern "unadjusted" {
9156        #[cfg_attr(
9157            any(target_arch = "aarch64", target_arch = "arm64ec"),
9158            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9159        )]
9160        fn _vcvtpd_s64_f64(a: f64) -> i64;
9161    }
9162    unsafe { _vcvtpd_s64_f64(a) }
9163}
9164#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9166#[inline]
9167#[target_feature(enable = "neon")]
9168#[cfg_attr(test, assert_instr(fcvtpu))]
9169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9170pub fn vcvtps_u32_f32(a: f32) -> u32 {
9171    unsafe extern "unadjusted" {
9172        #[cfg_attr(
9173            any(target_arch = "aarch64", target_arch = "arm64ec"),
9174            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9175        )]
9176        fn _vcvtps_u32_f32(a: f32) -> u32;
9177    }
9178    unsafe { _vcvtps_u32_f32(a) }
9179}
9180#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9182#[inline]
9183#[target_feature(enable = "neon")]
9184#[cfg_attr(test, assert_instr(fcvtpu))]
9185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9186pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9187    unsafe extern "unadjusted" {
9188        #[cfg_attr(
9189            any(target_arch = "aarch64", target_arch = "arm64ec"),
9190            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9191        )]
9192        fn _vcvtpd_u64_f64(a: f64) -> u64;
9193    }
9194    unsafe { _vcvtpd_u64_f64(a) }
9195}
9196#[doc = "Fixed-point convert to floating-point"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9198#[inline]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(ucvtf))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvts_f32_u32(a: u32) -> f32 {
9203    a as f32
9204}
9205#[doc = "Fixed-point convert to floating-point"]
9206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9207#[inline]
9208#[target_feature(enable = "neon")]
9209#[cfg_attr(test, assert_instr(ucvtf))]
9210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9211pub fn vcvtd_f64_u64(a: u64) -> f64 {
9212    a as f64
9213}
9214#[doc = "Fixed-point convert to floating-point"]
9215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9216#[inline]
9217#[target_feature(enable = "neon")]
9218#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9219#[rustc_legacy_const_generics(1)]
9220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9221pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9222    static_assert!(N >= 1 && N <= 64);
9223    unsafe extern "unadjusted" {
9224        #[cfg_attr(
9225            any(target_arch = "aarch64", target_arch = "arm64ec"),
9226            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9227        )]
9228        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9229    }
9230    unsafe { _vcvts_n_f32_s32(a, N) }
9231}
9232#[doc = "Fixed-point convert to floating-point"]
9233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9234#[inline]
9235#[target_feature(enable = "neon")]
9236#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9237#[rustc_legacy_const_generics(1)]
9238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9239pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9240    static_assert!(N >= 1 && N <= 64);
9241    unsafe extern "unadjusted" {
9242        #[cfg_attr(
9243            any(target_arch = "aarch64", target_arch = "arm64ec"),
9244            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9245        )]
9246        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9247    }
9248    unsafe { _vcvtd_n_f64_s64(a, N) }
9249}
9250#[doc = "Fixed-point convert to floating-point"]
9251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9252#[inline]
9253#[target_feature(enable = "neon")]
9254#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9255#[rustc_legacy_const_generics(1)]
9256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9257pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9258    static_assert!(N >= 1 && N <= 32);
9259    unsafe extern "unadjusted" {
9260        #[cfg_attr(
9261            any(target_arch = "aarch64", target_arch = "arm64ec"),
9262            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9263        )]
9264        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9265    }
9266    unsafe { _vcvts_n_f32_u32(a, N) }
9267}
9268#[doc = "Fixed-point convert to floating-point"]
9269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9270#[inline]
9271#[target_feature(enable = "neon")]
9272#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9273#[rustc_legacy_const_generics(1)]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9276    static_assert!(N >= 1 && N <= 64);
9277    unsafe extern "unadjusted" {
9278        #[cfg_attr(
9279            any(target_arch = "aarch64", target_arch = "arm64ec"),
9280            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9281        )]
9282        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9283    }
9284    unsafe { _vcvtd_n_f64_u64(a, N) }
9285}
9286#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9288#[inline]
9289#[target_feature(enable = "neon")]
9290#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9291#[rustc_legacy_const_generics(1)]
9292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9293pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9294    static_assert!(N >= 1 && N <= 32);
9295    unsafe extern "unadjusted" {
9296        #[cfg_attr(
9297            any(target_arch = "aarch64", target_arch = "arm64ec"),
9298            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9299        )]
9300        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9301    }
9302    unsafe { _vcvts_n_s32_f32(a, N) }
9303}
9304#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9306#[inline]
9307#[target_feature(enable = "neon")]
9308#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9309#[rustc_legacy_const_generics(1)]
9310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9311pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9312    static_assert!(N >= 1 && N <= 64);
9313    unsafe extern "unadjusted" {
9314        #[cfg_attr(
9315            any(target_arch = "aarch64", target_arch = "arm64ec"),
9316            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9317        )]
9318        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9319    }
9320    unsafe { _vcvtd_n_s64_f64(a, N) }
9321}
9322#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9324#[inline]
9325#[target_feature(enable = "neon")]
9326#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9327#[rustc_legacy_const_generics(1)]
9328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9329pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9330    static_assert!(N >= 1 && N <= 32);
9331    unsafe extern "unadjusted" {
9332        #[cfg_attr(
9333            any(target_arch = "aarch64", target_arch = "arm64ec"),
9334            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9335        )]
9336        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9337    }
9338    unsafe { _vcvts_n_u32_f32(a, N) }
9339}
9340#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9342#[inline]
9343#[target_feature(enable = "neon")]
9344#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9345#[rustc_legacy_const_generics(1)]
9346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9347pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9348    static_assert!(N >= 1 && N <= 64);
9349    unsafe extern "unadjusted" {
9350        #[cfg_attr(
9351            any(target_arch = "aarch64", target_arch = "arm64ec"),
9352            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9353        )]
9354        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9355    }
9356    unsafe { _vcvtd_n_u64_f64(a, N) }
9357}
9358#[doc = "Fixed-point convert to floating-point"]
9359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9360#[inline]
9361#[target_feature(enable = "neon")]
9362#[cfg_attr(test, assert_instr(fcvtzs))]
9363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9364pub fn vcvts_s32_f32(a: f32) -> i32 {
9365    a as i32
9366}
9367#[doc = "Fixed-point convert to floating-point"]
9368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9369#[inline]
9370#[target_feature(enable = "neon")]
9371#[cfg_attr(test, assert_instr(fcvtzs))]
9372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9373pub fn vcvtd_s64_f64(a: f64) -> i64 {
9374    a as i64
9375}
9376#[doc = "Fixed-point convert to floating-point"]
9377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9378#[inline]
9379#[target_feature(enable = "neon")]
9380#[cfg_attr(test, assert_instr(fcvtzu))]
9381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9382pub fn vcvts_u32_f32(a: f32) -> u32 {
9383    a as u32
9384}
9385#[doc = "Fixed-point convert to floating-point"]
9386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9387#[inline]
9388#[target_feature(enable = "neon")]
9389#[cfg_attr(test, assert_instr(fcvtzu))]
9390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9391pub fn vcvtd_u64_f64(a: f64) -> u64 {
9392    a as u64
9393}
9394#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9396#[inline]
9397#[target_feature(enable = "neon")]
9398#[cfg_attr(test, assert_instr(fcvtxn))]
9399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9400pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9401    unsafe extern "unadjusted" {
9402        #[cfg_attr(
9403            any(target_arch = "aarch64", target_arch = "arm64ec"),
9404            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9405        )]
9406        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9407    }
9408    unsafe { _vcvtx_f32_f64(a) }
9409}
9410#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9412#[inline]
9413#[target_feature(enable = "neon")]
9414#[cfg_attr(test, assert_instr(fcvtxn2))]
9415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9416pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9417    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9418}
9419#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9421#[inline]
9422#[target_feature(enable = "neon")]
9423#[cfg_attr(test, assert_instr(fcvtxn))]
9424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9425pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9426    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9427}
9428#[doc = "Divide"]
9429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9430#[inline]
9431#[target_feature(enable = "neon,fp16")]
9432#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9433#[cfg(not(target_arch = "arm64ec"))]
9434#[cfg_attr(test, assert_instr(fdiv))]
9435pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9436    unsafe { simd_div(a, b) }
9437}
9438#[doc = "Divide"]
9439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9440#[inline]
9441#[target_feature(enable = "neon,fp16")]
9442#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9443#[cfg(not(target_arch = "arm64ec"))]
9444#[cfg_attr(test, assert_instr(fdiv))]
9445pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9446    unsafe { simd_div(a, b) }
9447}
9448#[doc = "Divide"]
9449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9450#[inline]
9451#[target_feature(enable = "neon")]
9452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9453#[cfg_attr(test, assert_instr(fdiv))]
9454pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9455    unsafe { simd_div(a, b) }
9456}
9457#[doc = "Divide"]
9458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9459#[inline]
9460#[target_feature(enable = "neon")]
9461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9462#[cfg_attr(test, assert_instr(fdiv))]
9463pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9464    unsafe { simd_div(a, b) }
9465}
9466#[doc = "Divide"]
9467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9468#[inline]
9469#[target_feature(enable = "neon")]
9470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9471#[cfg_attr(test, assert_instr(fdiv))]
9472pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9473    unsafe { simd_div(a, b) }
9474}
9475#[doc = "Divide"]
9476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9477#[inline]
9478#[target_feature(enable = "neon")]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480#[cfg_attr(test, assert_instr(fdiv))]
9481pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9482    unsafe { simd_div(a, b) }
9483}
9484#[doc = "Divide"]
9485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9486#[inline]
9487#[target_feature(enable = "neon,fp16")]
9488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9489#[cfg(not(target_arch = "arm64ec"))]
9490#[cfg_attr(test, assert_instr(fdiv))]
9491pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9492    a / b
9493}
9494#[doc = "Dot product arithmetic (indexed)"]
9495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9496#[inline]
9497#[target_feature(enable = "neon,dotprod")]
9498#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9499#[rustc_legacy_const_generics(3)]
9500#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9501pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9502    static_assert_uimm_bits!(LANE, 2);
9503    let c: int32x4_t = vreinterpretq_s32_s8(c);
9504    unsafe {
9505        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9506        vdot_s32(a, b, vreinterpret_s8_s32(c))
9507    }
9508}
9509#[doc = "Dot product arithmetic (indexed)"]
9510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9511#[inline]
9512#[target_feature(enable = "neon,dotprod")]
9513#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9514#[rustc_legacy_const_generics(3)]
9515#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9516pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9517    static_assert_uimm_bits!(LANE, 2);
9518    let c: int32x4_t = vreinterpretq_s32_s8(c);
9519    unsafe {
9520        let c: int32x4_t =
9521            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9522        vdotq_s32(a, b, vreinterpretq_s8_s32(c))
9523    }
9524}
9525#[doc = "Dot product arithmetic (indexed)"]
9526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9527#[inline]
9528#[target_feature(enable = "neon,dotprod")]
9529#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9530#[rustc_legacy_const_generics(3)]
9531#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9532pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9533    static_assert_uimm_bits!(LANE, 2);
9534    let c: uint32x4_t = vreinterpretq_u32_u8(c);
9535    unsafe {
9536        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9537        vdot_u32(a, b, vreinterpret_u8_u32(c))
9538    }
9539}
9540#[doc = "Dot product arithmetic (indexed)"]
9541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9542#[inline]
9543#[target_feature(enable = "neon,dotprod")]
9544#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9545#[rustc_legacy_const_generics(3)]
9546#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9547pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9548    static_assert_uimm_bits!(LANE, 2);
9549    let c: uint32x4_t = vreinterpretq_u32_u8(c);
9550    unsafe {
9551        let c: uint32x4_t =
9552            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9553        vdotq_u32(a, b, vreinterpretq_u8_u32(c))
9554    }
9555}
9556#[doc = "Set all vector lanes to the same value"]
9557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9558#[inline]
9559#[target_feature(enable = "neon")]
9560#[cfg_attr(test, assert_instr(nop, N = 0))]
9561#[rustc_legacy_const_generics(1)]
9562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9563pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9564    static_assert!(N == 0);
9565    a
9566}
9567#[doc = "Set all vector lanes to the same value"]
9568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9569#[inline]
9570#[target_feature(enable = "neon")]
9571#[cfg_attr(test, assert_instr(nop, N = 0))]
9572#[rustc_legacy_const_generics(1)]
9573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9574pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9575    static_assert!(N == 0);
9576    a
9577}
9578#[doc = "Set all vector lanes to the same value"]
9579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9580#[inline]
9581#[target_feature(enable = "neon")]
9582#[cfg_attr(test, assert_instr(nop, N = 1))]
9583#[rustc_legacy_const_generics(1)]
9584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9585pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9586    static_assert_uimm_bits!(N, 1);
9587    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9588}
9589#[doc = "Set all vector lanes to the same value"]
9590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9591#[inline]
9592#[target_feature(enable = "neon")]
9593#[cfg_attr(test, assert_instr(nop, N = 1))]
9594#[rustc_legacy_const_generics(1)]
9595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9596pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9597    static_assert_uimm_bits!(N, 1);
9598    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9599}
9600#[doc = "Set all vector lanes to the same value"]
9601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9602#[inline]
9603#[target_feature(enable = "neon")]
9604#[cfg_attr(test, assert_instr(nop, N = 4))]
9605#[rustc_legacy_const_generics(1)]
9606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9607pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9608    static_assert_uimm_bits!(N, 3);
9609    unsafe { simd_extract!(a, N as u32) }
9610}
9611#[doc = "Set all vector lanes to the same value"]
9612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9613#[inline]
9614#[target_feature(enable = "neon")]
9615#[cfg_attr(test, assert_instr(nop, N = 4))]
9616#[rustc_legacy_const_generics(1)]
9617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9618pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9619    static_assert_uimm_bits!(N, 3);
9620    unsafe { simd_extract!(a, N as u32) }
9621}
9622#[doc = "Set all vector lanes to the same value"]
9623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9624#[inline]
9625#[target_feature(enable = "neon")]
9626#[cfg_attr(test, assert_instr(nop, N = 4))]
9627#[rustc_legacy_const_generics(1)]
9628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9629pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9630    static_assert_uimm_bits!(N, 3);
9631    unsafe { simd_extract!(a, N as u32) }
9632}
9633#[doc = "Set all vector lanes to the same value"]
9634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9635#[inline]
9636#[target_feature(enable = "neon")]
9637#[cfg_attr(test, assert_instr(nop, N = 4))]
9638#[rustc_legacy_const_generics(1)]
9639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9640pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9641    static_assert_uimm_bits!(N, 3);
9642    unsafe { simd_extract!(a, N as u32) }
9643}
9644#[doc = "Set all vector lanes to the same value"]
9645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9646#[inline]
9647#[target_feature(enable = "neon")]
9648#[cfg_attr(test, assert_instr(nop, N = 4))]
9649#[rustc_legacy_const_generics(1)]
9650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9651pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9652    static_assert_uimm_bits!(N, 3);
9653    unsafe { simd_extract!(a, N as u32) }
9654}
9655#[doc = "Set all vector lanes to the same value"]
9656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9657#[inline]
9658#[target_feature(enable = "neon")]
9659#[cfg_attr(test, assert_instr(nop, N = 4))]
9660#[rustc_legacy_const_generics(1)]
9661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9662pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9663    static_assert_uimm_bits!(N, 3);
9664    unsafe { simd_extract!(a, N as u32) }
9665}
9666#[doc = "Extract an element from a vector"]
9667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9668#[inline]
9669#[target_feature(enable = "neon")]
9670#[cfg_attr(test, assert_instr(nop, N = 8))]
9671#[rustc_legacy_const_generics(1)]
9672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9673pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9674    static_assert_uimm_bits!(N, 4);
9675    unsafe { simd_extract!(a, N as u32) }
9676}
9677#[doc = "Extract an element from a vector"]
9678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9679#[inline]
9680#[target_feature(enable = "neon")]
9681#[cfg_attr(test, assert_instr(nop, N = 8))]
9682#[rustc_legacy_const_generics(1)]
9683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9684pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9685    static_assert_uimm_bits!(N, 4);
9686    unsafe { simd_extract!(a, N as u32) }
9687}
9688#[doc = "Extract an element from a vector"]
9689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9690#[inline]
9691#[target_feature(enable = "neon")]
9692#[cfg_attr(test, assert_instr(nop, N = 8))]
9693#[rustc_legacy_const_generics(1)]
9694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9695pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9696    static_assert_uimm_bits!(N, 4);
9697    unsafe { simd_extract!(a, N as u32) }
9698}
9699#[doc = "Set all vector lanes to the same value"]
9700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9701#[inline]
9702#[target_feature(enable = "neon")]
9703#[cfg_attr(test, assert_instr(nop, N = 0))]
9704#[rustc_legacy_const_generics(1)]
9705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9706pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9707    static_assert!(N == 0);
9708    unsafe { simd_extract!(a, N as u32) }
9709}
9710#[doc = "Set all vector lanes to the same value"]
9711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9712#[inline]
9713#[target_feature(enable = "neon")]
9714#[cfg_attr(test, assert_instr(nop, N = 0))]
9715#[rustc_legacy_const_generics(1)]
9716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9717pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9718    static_assert!(N == 0);
9719    unsafe { simd_extract!(a, N as u32) }
9720}
9721#[doc = "Set all vector lanes to the same value"]
9722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9723#[inline]
9724#[target_feature(enable = "neon")]
9725#[cfg_attr(test, assert_instr(nop, N = 0))]
9726#[rustc_legacy_const_generics(1)]
9727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9728pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9729    static_assert!(N == 0);
9730    unsafe { simd_extract!(a, N as u32) }
9731}
9732#[doc = "Set all vector lanes to the same value"]
9733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9734#[inline]
9735#[cfg_attr(test, assert_instr(nop, N = 2))]
9736#[rustc_legacy_const_generics(1)]
9737#[target_feature(enable = "neon,fp16")]
9738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9739#[cfg(not(target_arch = "arm64ec"))]
9740pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9741    static_assert_uimm_bits!(N, 2);
9742    unsafe { simd_extract!(a, N as u32) }
9743}
9744#[doc = "Extract an element from a vector"]
9745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9746#[inline]
9747#[cfg_attr(test, assert_instr(nop, N = 4))]
9748#[rustc_legacy_const_generics(1)]
9749#[target_feature(enable = "neon,fp16")]
9750#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9751#[cfg(not(target_arch = "arm64ec"))]
9752pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9753    static_assert_uimm_bits!(N, 4);
9754    unsafe { simd_extract!(a, N as u32) }
9755}
9756#[doc = "Set all vector lanes to the same value"]
9757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9758#[inline]
9759#[target_feature(enable = "neon")]
9760#[cfg_attr(test, assert_instr(dup, N = 0))]
9761#[rustc_legacy_const_generics(1)]
9762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9763pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9764    static_assert!(N == 0);
9765    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9766}
9767#[doc = "Set all vector lanes to the same value"]
9768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9769#[inline]
9770#[target_feature(enable = "neon")]
9771#[cfg_attr(test, assert_instr(dup, N = 0))]
9772#[rustc_legacy_const_generics(1)]
9773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9774pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9775    static_assert!(N == 0);
9776    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9777}
9778#[doc = "Set all vector lanes to the same value"]
9779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9780#[inline]
9781#[target_feature(enable = "neon")]
9782#[cfg_attr(test, assert_instr(dup, N = 1))]
9783#[rustc_legacy_const_generics(1)]
9784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9785pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9786    static_assert_uimm_bits!(N, 1);
9787    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9788}
9789#[doc = "Set all vector lanes to the same value"]
9790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9791#[inline]
9792#[target_feature(enable = "neon")]
9793#[cfg_attr(test, assert_instr(dup, N = 1))]
9794#[rustc_legacy_const_generics(1)]
9795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9796pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9797    static_assert_uimm_bits!(N, 1);
9798    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9799}
9800#[doc = "Set all vector lanes to the same value"]
9801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9802#[inline]
9803#[target_feature(enable = "neon")]
9804#[cfg_attr(test, assert_instr(nop, N = 1))]
9805#[rustc_legacy_const_generics(1)]
9806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9807pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9808    static_assert_uimm_bits!(N, 1);
9809    unsafe { simd_extract!(a, N as u32) }
9810}
9811#[doc = "Set all vector lanes to the same value"]
9812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9813#[inline]
9814#[target_feature(enable = "neon")]
9815#[cfg_attr(test, assert_instr(nop, N = 1))]
9816#[rustc_legacy_const_generics(1)]
9817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9818pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9819    static_assert_uimm_bits!(N, 1);
9820    unsafe { simd_extract!(a, N as u32) }
9821}
9822#[doc = "Set all vector lanes to the same value"]
9823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9824#[inline]
9825#[target_feature(enable = "neon")]
9826#[cfg_attr(test, assert_instr(nop, N = 1))]
9827#[rustc_legacy_const_generics(1)]
9828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9829pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9830    static_assert_uimm_bits!(N, 1);
9831    unsafe { simd_extract!(a, N as u32) }
9832}
9833#[doc = "Set all vector lanes to the same value"]
9834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9835#[inline]
9836#[target_feature(enable = "neon")]
9837#[cfg_attr(test, assert_instr(nop, N = 1))]
9838#[rustc_legacy_const_generics(1)]
9839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9840pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9841    static_assert_uimm_bits!(N, 1);
9842    unsafe { simd_extract!(a, N as u32) }
9843}
9844#[doc = "Set all vector lanes to the same value"]
9845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9846#[inline]
9847#[target_feature(enable = "neon")]
9848#[cfg_attr(test, assert_instr(nop, N = 1))]
9849#[rustc_legacy_const_generics(1)]
9850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9851pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9852    static_assert_uimm_bits!(N, 1);
9853    unsafe { simd_extract!(a, N as u32) }
9854}
9855#[doc = "Set all vector lanes to the same value"]
9856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9857#[inline]
9858#[target_feature(enable = "neon")]
9859#[cfg_attr(test, assert_instr(nop, N = 1))]
9860#[rustc_legacy_const_generics(1)]
9861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9862pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9863    static_assert_uimm_bits!(N, 1);
9864    unsafe { simd_extract!(a, N as u32) }
9865}
9866#[doc = "Set all vector lanes to the same value"]
9867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9868#[inline]
9869#[target_feature(enable = "neon")]
9870#[cfg_attr(test, assert_instr(nop, N = 2))]
9871#[rustc_legacy_const_generics(1)]
9872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9873pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9874    static_assert_uimm_bits!(N, 2);
9875    unsafe { simd_extract!(a, N as u32) }
9876}
9877#[doc = "Set all vector lanes to the same value"]
9878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9879#[inline]
9880#[target_feature(enable = "neon")]
9881#[cfg_attr(test, assert_instr(nop, N = 2))]
9882#[rustc_legacy_const_generics(1)]
9883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9884pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9885    static_assert_uimm_bits!(N, 2);
9886    unsafe { simd_extract!(a, N as u32) }
9887}
9888#[doc = "Set all vector lanes to the same value"]
9889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9890#[inline]
9891#[target_feature(enable = "neon")]
9892#[cfg_attr(test, assert_instr(nop, N = 2))]
9893#[rustc_legacy_const_generics(1)]
9894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9895pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9896    static_assert_uimm_bits!(N, 2);
9897    unsafe { simd_extract!(a, N as u32) }
9898}
9899#[doc = "Set all vector lanes to the same value"]
9900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9901#[inline]
9902#[target_feature(enable = "neon")]
9903#[cfg_attr(test, assert_instr(nop, N = 2))]
9904#[rustc_legacy_const_generics(1)]
9905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9906pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9907    static_assert_uimm_bits!(N, 2);
9908    unsafe { simd_extract!(a, N as u32) }
9909}
9910#[doc = "Set all vector lanes to the same value"]
9911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9912#[inline]
9913#[target_feature(enable = "neon")]
9914#[cfg_attr(test, assert_instr(nop, N = 2))]
9915#[rustc_legacy_const_generics(1)]
9916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9917pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9918    static_assert_uimm_bits!(N, 2);
9919    unsafe { simd_extract!(a, N as u32) }
9920}
9921#[doc = "Set all vector lanes to the same value"]
9922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9923#[inline]
9924#[target_feature(enable = "neon")]
9925#[cfg_attr(test, assert_instr(nop, N = 2))]
9926#[rustc_legacy_const_generics(1)]
9927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9928pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9929    static_assert_uimm_bits!(N, 2);
9930    unsafe { simd_extract!(a, N as u32) }
9931}
9932#[doc = "Three-way exclusive OR"]
9933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9934#[inline]
9935#[target_feature(enable = "neon,sha3")]
9936#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9937#[cfg_attr(test, assert_instr(eor3))]
9938pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9939    unsafe extern "unadjusted" {
9940        #[cfg_attr(
9941            any(target_arch = "aarch64", target_arch = "arm64ec"),
9942            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9943        )]
9944        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9945    }
9946    unsafe { _veor3q_s8(a, b, c) }
9947}
9948#[doc = "Three-way exclusive OR"]
9949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9950#[inline]
9951#[target_feature(enable = "neon,sha3")]
9952#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9953#[cfg_attr(test, assert_instr(eor3))]
9954pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9955    unsafe extern "unadjusted" {
9956        #[cfg_attr(
9957            any(target_arch = "aarch64", target_arch = "arm64ec"),
9958            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9959        )]
9960        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9961    }
9962    unsafe { _veor3q_s16(a, b, c) }
9963}
9964#[doc = "Three-way exclusive OR"]
9965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9966#[inline]
9967#[target_feature(enable = "neon,sha3")]
9968#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9969#[cfg_attr(test, assert_instr(eor3))]
9970pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9971    unsafe extern "unadjusted" {
9972        #[cfg_attr(
9973            any(target_arch = "aarch64", target_arch = "arm64ec"),
9974            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9975        )]
9976        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9977    }
9978    unsafe { _veor3q_s32(a, b, c) }
9979}
9980#[doc = "Three-way exclusive OR"]
9981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9982#[inline]
9983#[target_feature(enable = "neon,sha3")]
9984#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9985#[cfg_attr(test, assert_instr(eor3))]
9986pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9987    unsafe extern "unadjusted" {
9988        #[cfg_attr(
9989            any(target_arch = "aarch64", target_arch = "arm64ec"),
9990            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9991        )]
9992        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9993    }
9994    unsafe { _veor3q_s64(a, b, c) }
9995}
9996#[doc = "Three-way exclusive OR"]
9997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9998#[inline]
9999#[target_feature(enable = "neon,sha3")]
10000#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10001#[cfg_attr(test, assert_instr(eor3))]
10002pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10003    unsafe extern "unadjusted" {
10004        #[cfg_attr(
10005            any(target_arch = "aarch64", target_arch = "arm64ec"),
10006            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10007        )]
10008        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10009    }
10010    unsafe { _veor3q_u8(a, b, c) }
10011}
10012#[doc = "Three-way exclusive OR"]
10013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10014#[inline]
10015#[target_feature(enable = "neon,sha3")]
10016#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10017#[cfg_attr(test, assert_instr(eor3))]
10018pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10019    unsafe extern "unadjusted" {
10020        #[cfg_attr(
10021            any(target_arch = "aarch64", target_arch = "arm64ec"),
10022            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10023        )]
10024        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10025    }
10026    unsafe { _veor3q_u16(a, b, c) }
10027}
10028#[doc = "Three-way exclusive OR"]
10029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10030#[inline]
10031#[target_feature(enable = "neon,sha3")]
10032#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10033#[cfg_attr(test, assert_instr(eor3))]
10034pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10035    unsafe extern "unadjusted" {
10036        #[cfg_attr(
10037            any(target_arch = "aarch64", target_arch = "arm64ec"),
10038            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10039        )]
10040        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10041    }
10042    unsafe { _veor3q_u32(a, b, c) }
10043}
10044#[doc = "Three-way exclusive OR"]
10045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10046#[inline]
10047#[target_feature(enable = "neon,sha3")]
10048#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10049#[cfg_attr(test, assert_instr(eor3))]
10050pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10051    unsafe extern "unadjusted" {
10052        #[cfg_attr(
10053            any(target_arch = "aarch64", target_arch = "arm64ec"),
10054            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10055        )]
10056        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10057    }
10058    unsafe { _veor3q_u64(a, b, c) }
10059}
10060#[doc = "Extract vector from pair of vectors"]
10061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10062#[inline]
10063#[target_feature(enable = "neon")]
10064#[cfg_attr(test, assert_instr(ext, N = 1))]
10065#[rustc_legacy_const_generics(2)]
10066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10067pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10068    static_assert_uimm_bits!(N, 1);
10069    unsafe {
10070        match N & 0b1 {
10071            0 => simd_shuffle!(a, b, [0, 1]),
10072            1 => simd_shuffle!(a, b, [1, 2]),
10073            _ => unreachable_unchecked(),
10074        }
10075    }
10076}
10077#[doc = "Extract vector from pair of vectors"]
10078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10079#[inline]
10080#[target_feature(enable = "neon")]
10081#[cfg_attr(test, assert_instr(ext, N = 1))]
10082#[rustc_legacy_const_generics(2)]
10083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10084pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10085    static_assert_uimm_bits!(N, 1);
10086    unsafe {
10087        match N & 0b1 {
10088            0 => simd_shuffle!(a, b, [0, 1]),
10089            1 => simd_shuffle!(a, b, [1, 2]),
10090            _ => unreachable_unchecked(),
10091        }
10092    }
10093}
10094#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10096#[inline]
10097#[target_feature(enable = "neon")]
10098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10099#[cfg_attr(test, assert_instr(fmadd))]
10100pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10101    unsafe { simd_fma(b, c, a) }
10102}
10103#[doc = "Floating-point fused multiply-add to accumulator"]
10104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10105#[inline]
10106#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10107#[rustc_legacy_const_generics(3)]
10108#[target_feature(enable = "neon,fp16")]
10109#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10110#[cfg(not(target_arch = "arm64ec"))]
10111pub fn vfma_lane_f16<const LANE: i32>(
10112    a: float16x4_t,
10113    b: float16x4_t,
10114    c: float16x4_t,
10115) -> float16x4_t {
10116    static_assert_uimm_bits!(LANE, 2);
10117    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10118}
10119#[doc = "Floating-point fused multiply-add to accumulator"]
10120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10121#[inline]
10122#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10123#[rustc_legacy_const_generics(3)]
10124#[target_feature(enable = "neon,fp16")]
10125#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10126#[cfg(not(target_arch = "arm64ec"))]
10127pub fn vfma_laneq_f16<const LANE: i32>(
10128    a: float16x4_t,
10129    b: float16x4_t,
10130    c: float16x8_t,
10131) -> float16x4_t {
10132    static_assert_uimm_bits!(LANE, 3);
10133    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10134}
10135#[doc = "Floating-point fused multiply-add to accumulator"]
10136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10137#[inline]
10138#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10139#[rustc_legacy_const_generics(3)]
10140#[target_feature(enable = "neon,fp16")]
10141#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10142#[cfg(not(target_arch = "arm64ec"))]
10143pub fn vfmaq_lane_f16<const LANE: i32>(
10144    a: float16x8_t,
10145    b: float16x8_t,
10146    c: float16x4_t,
10147) -> float16x8_t {
10148    static_assert_uimm_bits!(LANE, 2);
10149    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10150}
10151#[doc = "Floating-point fused multiply-add to accumulator"]
10152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10153#[inline]
10154#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10155#[rustc_legacy_const_generics(3)]
10156#[target_feature(enable = "neon,fp16")]
10157#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10158#[cfg(not(target_arch = "arm64ec"))]
10159pub fn vfmaq_laneq_f16<const LANE: i32>(
10160    a: float16x8_t,
10161    b: float16x8_t,
10162    c: float16x8_t,
10163) -> float16x8_t {
10164    static_assert_uimm_bits!(LANE, 3);
10165    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10166}
10167#[doc = "Floating-point fused multiply-add to accumulator"]
10168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10169#[inline]
10170#[target_feature(enable = "neon")]
10171#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10172#[rustc_legacy_const_generics(3)]
10173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10174pub fn vfma_lane_f32<const LANE: i32>(
10175    a: float32x2_t,
10176    b: float32x2_t,
10177    c: float32x2_t,
10178) -> float32x2_t {
10179    static_assert_uimm_bits!(LANE, 1);
10180    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10181}
10182#[doc = "Floating-point fused multiply-add to accumulator"]
10183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10184#[inline]
10185#[target_feature(enable = "neon")]
10186#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10187#[rustc_legacy_const_generics(3)]
10188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10189pub fn vfma_laneq_f32<const LANE: i32>(
10190    a: float32x2_t,
10191    b: float32x2_t,
10192    c: float32x4_t,
10193) -> float32x2_t {
10194    static_assert_uimm_bits!(LANE, 2);
10195    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10196}
10197#[doc = "Floating-point fused multiply-add to accumulator"]
10198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10199#[inline]
10200#[target_feature(enable = "neon")]
10201#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10202#[rustc_legacy_const_generics(3)]
10203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10204pub fn vfmaq_lane_f32<const LANE: i32>(
10205    a: float32x4_t,
10206    b: float32x4_t,
10207    c: float32x2_t,
10208) -> float32x4_t {
10209    static_assert_uimm_bits!(LANE, 1);
10210    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10211}
10212#[doc = "Floating-point fused multiply-add to accumulator"]
10213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10214#[inline]
10215#[target_feature(enable = "neon")]
10216#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10217#[rustc_legacy_const_generics(3)]
10218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10219pub fn vfmaq_laneq_f32<const LANE: i32>(
10220    a: float32x4_t,
10221    b: float32x4_t,
10222    c: float32x4_t,
10223) -> float32x4_t {
10224    static_assert_uimm_bits!(LANE, 2);
10225    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10226}
10227#[doc = "Floating-point fused multiply-add to accumulator"]
10228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10229#[inline]
10230#[target_feature(enable = "neon")]
10231#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10232#[rustc_legacy_const_generics(3)]
10233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10234pub fn vfmaq_laneq_f64<const LANE: i32>(
10235    a: float64x2_t,
10236    b: float64x2_t,
10237    c: float64x2_t,
10238) -> float64x2_t {
10239    static_assert_uimm_bits!(LANE, 1);
10240    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10241}
10242#[doc = "Floating-point fused multiply-add to accumulator"]
10243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10244#[inline]
10245#[target_feature(enable = "neon")]
10246#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10247#[rustc_legacy_const_generics(3)]
10248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10249pub fn vfma_lane_f64<const LANE: i32>(
10250    a: float64x1_t,
10251    b: float64x1_t,
10252    c: float64x1_t,
10253) -> float64x1_t {
10254    static_assert!(LANE == 0);
10255    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10256}
10257#[doc = "Floating-point fused multiply-add to accumulator"]
10258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10259#[inline]
10260#[target_feature(enable = "neon")]
10261#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10262#[rustc_legacy_const_generics(3)]
10263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10264pub fn vfma_laneq_f64<const LANE: i32>(
10265    a: float64x1_t,
10266    b: float64x1_t,
10267    c: float64x2_t,
10268) -> float64x1_t {
10269    static_assert_uimm_bits!(LANE, 1);
10270    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10271}
10272#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10274#[inline]
10275#[target_feature(enable = "neon,fp16")]
10276#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10277#[cfg(not(target_arch = "arm64ec"))]
10278#[cfg_attr(test, assert_instr(fmla))]
10279pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10280    vfma_f16(a, b, vdup_n_f16(c))
10281}
10282#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10284#[inline]
10285#[target_feature(enable = "neon,fp16")]
10286#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10287#[cfg(not(target_arch = "arm64ec"))]
10288#[cfg_attr(test, assert_instr(fmla))]
10289pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10290    vfmaq_f16(a, b, vdupq_n_f16(c))
10291}
10292#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10294#[inline]
10295#[target_feature(enable = "neon")]
10296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10297#[cfg_attr(test, assert_instr(fmadd))]
10298pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10299    vfma_f64(a, b, vdup_n_f64(c))
10300}
10301#[doc = "Floating-point fused multiply-add to accumulator"]
10302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10303#[inline]
10304#[target_feature(enable = "neon")]
10305#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10306#[rustc_legacy_const_generics(3)]
10307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10308pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10309    static_assert!(LANE == 0);
10310    unsafe {
10311        let c: f64 = simd_extract!(c, LANE as u32);
10312        fmaf64(b, c, a)
10313    }
10314}
10315#[doc = "Floating-point fused multiply-add to accumulator"]
10316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10317#[inline]
10318#[cfg_attr(test, assert_instr(fmadd))]
10319#[target_feature(enable = "neon,fp16")]
10320#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10321#[cfg(not(target_arch = "arm64ec"))]
10322pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10323    fmaf16(b, c, a)
10324}
10325#[doc = "Floating-point fused multiply-add to accumulator"]
10326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10327#[inline]
10328#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10329#[rustc_legacy_const_generics(3)]
10330#[target_feature(enable = "neon,fp16")]
10331#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10332#[cfg(not(target_arch = "arm64ec"))]
10333pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10334    static_assert_uimm_bits!(LANE, 2);
10335    unsafe {
10336        let c: f16 = simd_extract!(v, LANE as u32);
10337        vfmah_f16(a, b, c)
10338    }
10339}
10340#[doc = "Floating-point fused multiply-add to accumulator"]
10341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10342#[inline]
10343#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10344#[rustc_legacy_const_generics(3)]
10345#[target_feature(enable = "neon,fp16")]
10346#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10347#[cfg(not(target_arch = "arm64ec"))]
10348pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10349    static_assert_uimm_bits!(LANE, 3);
10350    unsafe {
10351        let c: f16 = simd_extract!(v, LANE as u32);
10352        vfmah_f16(a, b, c)
10353    }
10354}
10355#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10357#[inline]
10358#[target_feature(enable = "neon")]
10359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10360#[cfg_attr(test, assert_instr(fmla))]
10361pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10362    unsafe { simd_fma(b, c, a) }
10363}
10364#[doc = "Floating-point fused multiply-add to accumulator"]
10365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10366#[inline]
10367#[target_feature(enable = "neon")]
10368#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10369#[rustc_legacy_const_generics(3)]
10370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10371pub fn vfmaq_lane_f64<const LANE: i32>(
10372    a: float64x2_t,
10373    b: float64x2_t,
10374    c: float64x1_t,
10375) -> float64x2_t {
10376    static_assert!(LANE == 0);
10377    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10378}
10379#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10381#[inline]
10382#[target_feature(enable = "neon")]
10383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10384#[cfg_attr(test, assert_instr(fmla))]
10385pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10386    vfmaq_f64(a, b, vdupq_n_f64(c))
10387}
10388#[doc = "Floating-point fused multiply-add to accumulator"]
10389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10390#[inline]
10391#[target_feature(enable = "neon")]
10392#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10393#[rustc_legacy_const_generics(3)]
10394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10395pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10396    static_assert_uimm_bits!(LANE, 1);
10397    unsafe {
10398        let c: f32 = simd_extract!(c, LANE as u32);
10399        fmaf32(b, c, a)
10400    }
10401}
10402#[doc = "Floating-point fused multiply-add to accumulator"]
10403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10404#[inline]
10405#[target_feature(enable = "neon")]
10406#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10407#[rustc_legacy_const_generics(3)]
10408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10409pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10410    static_assert_uimm_bits!(LANE, 2);
10411    unsafe {
10412        let c: f32 = simd_extract!(c, LANE as u32);
10413        fmaf32(b, c, a)
10414    }
10415}
10416#[doc = "Floating-point fused multiply-add to accumulator"]
10417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10418#[inline]
10419#[target_feature(enable = "neon")]
10420#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10421#[rustc_legacy_const_generics(3)]
10422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10423pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10424    static_assert_uimm_bits!(LANE, 1);
10425    unsafe {
10426        let c: f64 = simd_extract!(c, LANE as u32);
10427        fmaf64(b, c, a)
10428    }
10429}
10430#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10432#[inline]
10433#[target_feature(enable = "neon,fp16")]
10434#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10435#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10436#[cfg(not(target_arch = "arm64ec"))]
10437#[cfg_attr(test, assert_instr(fmlal2))]
10438pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10439    unsafe extern "unadjusted" {
10440        #[cfg_attr(
10441            any(target_arch = "aarch64", target_arch = "arm64ec"),
10442            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10443        )]
10444        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10445    }
10446    unsafe { _vfmlal_high_f16(r, a, b) }
10447}
10448#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10450#[inline]
10451#[target_feature(enable = "neon,fp16")]
10452#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10453#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10454#[cfg(not(target_arch = "arm64ec"))]
10455#[cfg_attr(test, assert_instr(fmlal2))]
10456pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10457    unsafe extern "unadjusted" {
10458        #[cfg_attr(
10459            any(target_arch = "aarch64", target_arch = "arm64ec"),
10460            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10461        )]
10462        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10463    }
10464    unsafe { _vfmlalq_high_f16(r, a, b) }
10465}
10466#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10468#[inline]
10469#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10470#[target_feature(enable = "neon,fp16")]
10471#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10472#[rustc_legacy_const_generics(3)]
10473#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10474#[cfg(not(target_arch = "arm64ec"))]
10475pub fn vfmlal_lane_high_f16<const LANE: i32>(
10476    r: float32x2_t,
10477    a: float16x4_t,
10478    b: float16x4_t,
10479) -> float32x2_t {
10480    static_assert_uimm_bits!(LANE, 2);
10481    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10482}
10483#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10485#[inline]
10486#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10487#[target_feature(enable = "neon,fp16")]
10488#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10489#[rustc_legacy_const_generics(3)]
10490#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10491#[cfg(not(target_arch = "arm64ec"))]
10492pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10493    r: float32x2_t,
10494    a: float16x4_t,
10495    b: float16x8_t,
10496) -> float32x2_t {
10497    static_assert_uimm_bits!(LANE, 3);
10498    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10499}
10500#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10502#[inline]
10503#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10504#[target_feature(enable = "neon,fp16")]
10505#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10506#[rustc_legacy_const_generics(3)]
10507#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10508#[cfg(not(target_arch = "arm64ec"))]
10509pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10510    r: float32x4_t,
10511    a: float16x8_t,
10512    b: float16x4_t,
10513) -> float32x4_t {
10514    static_assert_uimm_bits!(LANE, 2);
10515    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10516}
10517#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10519#[inline]
10520#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10521#[target_feature(enable = "neon,fp16")]
10522#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10523#[rustc_legacy_const_generics(3)]
10524#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10525#[cfg(not(target_arch = "arm64ec"))]
10526pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10527    r: float32x4_t,
10528    a: float16x8_t,
10529    b: float16x8_t,
10530) -> float32x4_t {
10531    static_assert_uimm_bits!(LANE, 3);
10532    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10533}
10534#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10536#[inline]
10537#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10538#[target_feature(enable = "neon,fp16")]
10539#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10540#[rustc_legacy_const_generics(3)]
10541#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10542#[cfg(not(target_arch = "arm64ec"))]
10543pub fn vfmlal_lane_low_f16<const LANE: i32>(
10544    r: float32x2_t,
10545    a: float16x4_t,
10546    b: float16x4_t,
10547) -> float32x2_t {
10548    static_assert_uimm_bits!(LANE, 2);
10549    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10550}
10551#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10553#[inline]
10554#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10555#[target_feature(enable = "neon,fp16")]
10556#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10557#[rustc_legacy_const_generics(3)]
10558#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10559#[cfg(not(target_arch = "arm64ec"))]
10560pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10561    r: float32x2_t,
10562    a: float16x4_t,
10563    b: float16x8_t,
10564) -> float32x2_t {
10565    static_assert_uimm_bits!(LANE, 3);
10566    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10567}
10568#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10570#[inline]
10571#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10572#[target_feature(enable = "neon,fp16")]
10573#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10574#[rustc_legacy_const_generics(3)]
10575#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10576#[cfg(not(target_arch = "arm64ec"))]
10577pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10578    r: float32x4_t,
10579    a: float16x8_t,
10580    b: float16x4_t,
10581) -> float32x4_t {
10582    static_assert_uimm_bits!(LANE, 2);
10583    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10584}
10585#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10587#[inline]
10588#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10589#[target_feature(enable = "neon,fp16")]
10590#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10591#[rustc_legacy_const_generics(3)]
10592#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10593#[cfg(not(target_arch = "arm64ec"))]
10594pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10595    r: float32x4_t,
10596    a: float16x8_t,
10597    b: float16x8_t,
10598) -> float32x4_t {
10599    static_assert_uimm_bits!(LANE, 3);
10600    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10601}
10602#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10604#[inline]
10605#[target_feature(enable = "neon,fp16")]
10606#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10607#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10608#[cfg(not(target_arch = "arm64ec"))]
10609#[cfg_attr(test, assert_instr(fmlal))]
10610pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10611    unsafe extern "unadjusted" {
10612        #[cfg_attr(
10613            any(target_arch = "aarch64", target_arch = "arm64ec"),
10614            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10615        )]
10616        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10617    }
10618    unsafe { _vfmlal_low_f16(r, a, b) }
10619}
10620#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10622#[inline]
10623#[target_feature(enable = "neon,fp16")]
10624#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10625#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10626#[cfg(not(target_arch = "arm64ec"))]
10627#[cfg_attr(test, assert_instr(fmlal))]
10628pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10629    unsafe extern "unadjusted" {
10630        #[cfg_attr(
10631            any(target_arch = "aarch64", target_arch = "arm64ec"),
10632            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10633        )]
10634        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10635    }
10636    unsafe { _vfmlalq_low_f16(r, a, b) }
10637}
10638#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10640#[inline]
10641#[target_feature(enable = "neon,fp16")]
10642#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10643#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10644#[cfg(not(target_arch = "arm64ec"))]
10645#[cfg_attr(test, assert_instr(fmlsl2))]
10646pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10647    unsafe extern "unadjusted" {
10648        #[cfg_attr(
10649            any(target_arch = "aarch64", target_arch = "arm64ec"),
10650            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10651        )]
10652        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10653    }
10654    unsafe { _vfmlsl_high_f16(r, a, b) }
10655}
10656#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10658#[inline]
10659#[target_feature(enable = "neon,fp16")]
10660#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10661#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10662#[cfg(not(target_arch = "arm64ec"))]
10663#[cfg_attr(test, assert_instr(fmlsl2))]
10664pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10665    unsafe extern "unadjusted" {
10666        #[cfg_attr(
10667            any(target_arch = "aarch64", target_arch = "arm64ec"),
10668            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10669        )]
10670        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10671    }
10672    unsafe { _vfmlslq_high_f16(r, a, b) }
10673}
10674#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10676#[inline]
10677#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10678#[target_feature(enable = "neon,fp16")]
10679#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10680#[rustc_legacy_const_generics(3)]
10681#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10682#[cfg(not(target_arch = "arm64ec"))]
10683pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10684    r: float32x2_t,
10685    a: float16x4_t,
10686    b: float16x4_t,
10687) -> float32x2_t {
10688    static_assert_uimm_bits!(LANE, 2);
10689    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10690}
10691#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10693#[inline]
10694#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10695#[target_feature(enable = "neon,fp16")]
10696#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10697#[rustc_legacy_const_generics(3)]
10698#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10699#[cfg(not(target_arch = "arm64ec"))]
10700pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10701    r: float32x2_t,
10702    a: float16x4_t,
10703    b: float16x8_t,
10704) -> float32x2_t {
10705    static_assert_uimm_bits!(LANE, 3);
10706    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10707}
10708#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10710#[inline]
10711#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10712#[target_feature(enable = "neon,fp16")]
10713#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10714#[rustc_legacy_const_generics(3)]
10715#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10716#[cfg(not(target_arch = "arm64ec"))]
10717pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10718    r: float32x4_t,
10719    a: float16x8_t,
10720    b: float16x4_t,
10721) -> float32x4_t {
10722    static_assert_uimm_bits!(LANE, 2);
10723    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10724}
10725#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10727#[inline]
10728#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10729#[target_feature(enable = "neon,fp16")]
10730#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10731#[rustc_legacy_const_generics(3)]
10732#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10733#[cfg(not(target_arch = "arm64ec"))]
10734pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10735    r: float32x4_t,
10736    a: float16x8_t,
10737    b: float16x8_t,
10738) -> float32x4_t {
10739    static_assert_uimm_bits!(LANE, 3);
10740    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10741}
10742#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10744#[inline]
10745#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10746#[target_feature(enable = "neon,fp16")]
10747#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10748#[rustc_legacy_const_generics(3)]
10749#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10750#[cfg(not(target_arch = "arm64ec"))]
10751pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10752    r: float32x2_t,
10753    a: float16x4_t,
10754    b: float16x4_t,
10755) -> float32x2_t {
10756    static_assert_uimm_bits!(LANE, 2);
10757    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10758}
10759#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10761#[inline]
10762#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10763#[target_feature(enable = "neon,fp16")]
10764#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10765#[rustc_legacy_const_generics(3)]
10766#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10767#[cfg(not(target_arch = "arm64ec"))]
10768pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10769    r: float32x2_t,
10770    a: float16x4_t,
10771    b: float16x8_t,
10772) -> float32x2_t {
10773    static_assert_uimm_bits!(LANE, 3);
10774    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10775}
10776#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10778#[inline]
10779#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10780#[target_feature(enable = "neon,fp16")]
10781#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10782#[rustc_legacy_const_generics(3)]
10783#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10784#[cfg(not(target_arch = "arm64ec"))]
10785pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10786    r: float32x4_t,
10787    a: float16x8_t,
10788    b: float16x4_t,
10789) -> float32x4_t {
10790    static_assert_uimm_bits!(LANE, 2);
10791    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10792}
10793#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10795#[inline]
10796#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10797#[target_feature(enable = "neon,fp16")]
10798#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10799#[rustc_legacy_const_generics(3)]
10800#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10801#[cfg(not(target_arch = "arm64ec"))]
10802pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10803    r: float32x4_t,
10804    a: float16x8_t,
10805    b: float16x8_t,
10806) -> float32x4_t {
10807    static_assert_uimm_bits!(LANE, 3);
10808    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10809}
10810#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10812#[inline]
10813#[target_feature(enable = "neon,fp16")]
10814#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10815#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10816#[cfg(not(target_arch = "arm64ec"))]
10817#[cfg_attr(test, assert_instr(fmlsl))]
10818pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10819    unsafe extern "unadjusted" {
10820        #[cfg_attr(
10821            any(target_arch = "aarch64", target_arch = "arm64ec"),
10822            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10823        )]
10824        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10825    }
10826    unsafe { _vfmlsl_low_f16(r, a, b) }
10827}
10828#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10830#[inline]
10831#[target_feature(enable = "neon,fp16")]
10832#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10833#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10834#[cfg(not(target_arch = "arm64ec"))]
10835#[cfg_attr(test, assert_instr(fmlsl))]
10836pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10837    unsafe extern "unadjusted" {
10838        #[cfg_attr(
10839            any(target_arch = "aarch64", target_arch = "arm64ec"),
10840            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10841        )]
10842        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10843    }
10844    unsafe { _vfmlslq_low_f16(r, a, b) }
10845}
10846#[doc = "Floating-point fused multiply-subtract from accumulator"]
10847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10848#[inline]
10849#[target_feature(enable = "neon")]
10850#[cfg_attr(test, assert_instr(fmsub))]
10851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10852pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10853    unsafe {
10854        let b: float64x1_t = simd_neg(b);
10855        vfma_f64(a, b, c)
10856    }
10857}
10858#[doc = "Floating-point fused multiply-subtract from accumulator"]
10859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10860#[inline]
10861#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10862#[rustc_legacy_const_generics(3)]
10863#[target_feature(enable = "neon,fp16")]
10864#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10865#[cfg(not(target_arch = "arm64ec"))]
10866pub fn vfms_lane_f16<const LANE: i32>(
10867    a: float16x4_t,
10868    b: float16x4_t,
10869    c: float16x4_t,
10870) -> float16x4_t {
10871    static_assert_uimm_bits!(LANE, 2);
10872    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10873}
10874#[doc = "Floating-point fused multiply-subtract from accumulator"]
10875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10876#[inline]
10877#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10878#[rustc_legacy_const_generics(3)]
10879#[target_feature(enable = "neon,fp16")]
10880#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10881#[cfg(not(target_arch = "arm64ec"))]
10882pub fn vfms_laneq_f16<const LANE: i32>(
10883    a: float16x4_t,
10884    b: float16x4_t,
10885    c: float16x8_t,
10886) -> float16x4_t {
10887    static_assert_uimm_bits!(LANE, 3);
10888    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10889}
10890#[doc = "Floating-point fused multiply-subtract from accumulator"]
10891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10892#[inline]
10893#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10894#[rustc_legacy_const_generics(3)]
10895#[target_feature(enable = "neon,fp16")]
10896#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10897#[cfg(not(target_arch = "arm64ec"))]
10898pub fn vfmsq_lane_f16<const LANE: i32>(
10899    a: float16x8_t,
10900    b: float16x8_t,
10901    c: float16x4_t,
10902) -> float16x8_t {
10903    static_assert_uimm_bits!(LANE, 2);
10904    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10905}
10906#[doc = "Floating-point fused multiply-subtract from accumulator"]
10907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10908#[inline]
10909#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10910#[rustc_legacy_const_generics(3)]
10911#[target_feature(enable = "neon,fp16")]
10912#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10913#[cfg(not(target_arch = "arm64ec"))]
10914pub fn vfmsq_laneq_f16<const LANE: i32>(
10915    a: float16x8_t,
10916    b: float16x8_t,
10917    c: float16x8_t,
10918) -> float16x8_t {
10919    static_assert_uimm_bits!(LANE, 3);
10920    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10921}
10922#[doc = "Floating-point fused multiply-subtract to accumulator"]
10923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10924#[inline]
10925#[target_feature(enable = "neon")]
10926#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10927#[rustc_legacy_const_generics(3)]
10928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10929pub fn vfms_lane_f32<const LANE: i32>(
10930    a: float32x2_t,
10931    b: float32x2_t,
10932    c: float32x2_t,
10933) -> float32x2_t {
10934    static_assert_uimm_bits!(LANE, 1);
10935    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10936}
10937#[doc = "Floating-point fused multiply-subtract to accumulator"]
10938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10939#[inline]
10940#[target_feature(enable = "neon")]
10941#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10942#[rustc_legacy_const_generics(3)]
10943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10944pub fn vfms_laneq_f32<const LANE: i32>(
10945    a: float32x2_t,
10946    b: float32x2_t,
10947    c: float32x4_t,
10948) -> float32x2_t {
10949    static_assert_uimm_bits!(LANE, 2);
10950    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10951}
10952#[doc = "Floating-point fused multiply-subtract to accumulator"]
10953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10954#[inline]
10955#[target_feature(enable = "neon")]
10956#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10957#[rustc_legacy_const_generics(3)]
10958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10959pub fn vfmsq_lane_f32<const LANE: i32>(
10960    a: float32x4_t,
10961    b: float32x4_t,
10962    c: float32x2_t,
10963) -> float32x4_t {
10964    static_assert_uimm_bits!(LANE, 1);
10965    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10966}
10967#[doc = "Floating-point fused multiply-subtract to accumulator"]
10968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10969#[inline]
10970#[target_feature(enable = "neon")]
10971#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10972#[rustc_legacy_const_generics(3)]
10973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10974pub fn vfmsq_laneq_f32<const LANE: i32>(
10975    a: float32x4_t,
10976    b: float32x4_t,
10977    c: float32x4_t,
10978) -> float32x4_t {
10979    static_assert_uimm_bits!(LANE, 2);
10980    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10981}
10982#[doc = "Floating-point fused multiply-subtract to accumulator"]
10983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10984#[inline]
10985#[target_feature(enable = "neon")]
10986#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10987#[rustc_legacy_const_generics(3)]
10988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10989pub fn vfmsq_laneq_f64<const LANE: i32>(
10990    a: float64x2_t,
10991    b: float64x2_t,
10992    c: float64x2_t,
10993) -> float64x2_t {
10994    static_assert_uimm_bits!(LANE, 1);
10995    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10996}
10997#[doc = "Floating-point fused multiply-subtract to accumulator"]
10998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10999#[inline]
11000#[target_feature(enable = "neon")]
11001#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11002#[rustc_legacy_const_generics(3)]
11003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11004pub fn vfms_lane_f64<const LANE: i32>(
11005    a: float64x1_t,
11006    b: float64x1_t,
11007    c: float64x1_t,
11008) -> float64x1_t {
11009    static_assert!(LANE == 0);
11010    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11011}
11012#[doc = "Floating-point fused multiply-subtract to accumulator"]
11013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11014#[inline]
11015#[target_feature(enable = "neon")]
11016#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11017#[rustc_legacy_const_generics(3)]
11018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11019pub fn vfms_laneq_f64<const LANE: i32>(
11020    a: float64x1_t,
11021    b: float64x1_t,
11022    c: float64x2_t,
11023) -> float64x1_t {
11024    static_assert_uimm_bits!(LANE, 1);
11025    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11026}
11027#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11029#[inline]
11030#[target_feature(enable = "neon,fp16")]
11031#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11032#[cfg(not(target_arch = "arm64ec"))]
11033#[cfg_attr(test, assert_instr(fmls))]
11034pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11035    vfms_f16(a, b, vdup_n_f16(c))
11036}
11037#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11039#[inline]
11040#[target_feature(enable = "neon,fp16")]
11041#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11042#[cfg(not(target_arch = "arm64ec"))]
11043#[cfg_attr(test, assert_instr(fmls))]
11044pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11045    vfmsq_f16(a, b, vdupq_n_f16(c))
11046}
11047#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11049#[inline]
11050#[target_feature(enable = "neon")]
11051#[cfg_attr(test, assert_instr(fmsub))]
11052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11053pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11054    vfms_f64(a, b, vdup_n_f64(c))
11055}
11056#[doc = "Floating-point fused multiply-subtract from accumulator"]
11057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11058#[inline]
11059#[cfg_attr(test, assert_instr(fmsub))]
11060#[target_feature(enable = "neon,fp16")]
11061#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11062#[cfg(not(target_arch = "arm64ec"))]
11063pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11064    vfmah_f16(a, -b, c)
11065}
11066#[doc = "Floating-point fused multiply-subtract from accumulator"]
11067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11068#[inline]
11069#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11070#[rustc_legacy_const_generics(3)]
11071#[target_feature(enable = "neon,fp16")]
11072#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11073#[cfg(not(target_arch = "arm64ec"))]
11074pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11075    static_assert_uimm_bits!(LANE, 2);
11076    unsafe {
11077        let c: f16 = simd_extract!(v, LANE as u32);
11078        vfmsh_f16(a, b, c)
11079    }
11080}
11081#[doc = "Floating-point fused multiply-subtract from accumulator"]
11082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11083#[inline]
11084#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11085#[rustc_legacy_const_generics(3)]
11086#[target_feature(enable = "neon,fp16")]
11087#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11088#[cfg(not(target_arch = "arm64ec"))]
11089pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11090    static_assert_uimm_bits!(LANE, 3);
11091    unsafe {
11092        let c: f16 = simd_extract!(v, LANE as u32);
11093        vfmsh_f16(a, b, c)
11094    }
11095}
11096#[doc = "Floating-point fused multiply-subtract from accumulator"]
11097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11098#[inline]
11099#[target_feature(enable = "neon")]
11100#[cfg_attr(test, assert_instr(fmls))]
11101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11102pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11103    unsafe {
11104        let b: float64x2_t = simd_neg(b);
11105        vfmaq_f64(a, b, c)
11106    }
11107}
11108#[doc = "Floating-point fused multiply-subtract to accumulator"]
11109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11110#[inline]
11111#[target_feature(enable = "neon")]
11112#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11113#[rustc_legacy_const_generics(3)]
11114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11115pub fn vfmsq_lane_f64<const LANE: i32>(
11116    a: float64x2_t,
11117    b: float64x2_t,
11118    c: float64x1_t,
11119) -> float64x2_t {
11120    static_assert!(LANE == 0);
11121    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11122}
11123#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11125#[inline]
11126#[target_feature(enable = "neon")]
11127#[cfg_attr(test, assert_instr(fmls))]
11128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11129pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11130    vfmsq_f64(a, b, vdupq_n_f64(c))
11131}
11132#[doc = "Floating-point fused multiply-subtract to accumulator"]
11133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11134#[inline]
11135#[target_feature(enable = "neon")]
11136#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11137#[rustc_legacy_const_generics(3)]
11138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11139pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11140    vfmas_lane_f32::<LANE>(a, -b, c)
11141}
11142#[doc = "Floating-point fused multiply-subtract to accumulator"]
11143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11144#[inline]
11145#[target_feature(enable = "neon")]
11146#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11147#[rustc_legacy_const_generics(3)]
11148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11149pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11150    vfmas_laneq_f32::<LANE>(a, -b, c)
11151}
11152#[doc = "Floating-point fused multiply-subtract to accumulator"]
11153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11154#[inline]
11155#[target_feature(enable = "neon")]
11156#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11157#[rustc_legacy_const_generics(3)]
11158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11159pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11160    vfmad_lane_f64::<LANE>(a, -b, c)
11161}
11162#[doc = "Floating-point fused multiply-subtract to accumulator"]
11163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11164#[inline]
11165#[target_feature(enable = "neon")]
11166#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11167#[rustc_legacy_const_generics(3)]
11168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11169pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11170    vfmad_laneq_f64::<LANE>(a, -b, c)
11171}
11172#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11174#[doc = "## Safety"]
11175#[doc = "  * Neon instrinsic unsafe"]
11176#[inline]
11177#[target_feature(enable = "neon,fp16")]
11178#[cfg_attr(test, assert_instr(ldr))]
11179#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11180#[cfg(not(target_arch = "arm64ec"))]
11181pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11182    crate::ptr::read_unaligned(ptr.cast())
11183}
11184#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11186#[doc = "## Safety"]
11187#[doc = "  * Neon instrinsic unsafe"]
11188#[inline]
11189#[target_feature(enable = "neon,fp16")]
11190#[cfg_attr(test, assert_instr(ldr))]
11191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11192#[cfg(not(target_arch = "arm64ec"))]
11193pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11194    crate::ptr::read_unaligned(ptr.cast())
11195}
11196#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11198#[doc = "## Safety"]
11199#[doc = "  * Neon instrinsic unsafe"]
11200#[inline]
11201#[target_feature(enable = "neon")]
11202#[cfg_attr(test, assert_instr(ldr))]
11203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11204pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11205    crate::ptr::read_unaligned(ptr.cast())
11206}
11207#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11209#[doc = "## Safety"]
11210#[doc = "  * Neon instrinsic unsafe"]
11211#[inline]
11212#[target_feature(enable = "neon")]
11213#[cfg_attr(test, assert_instr(ldr))]
11214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11215pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11216    crate::ptr::read_unaligned(ptr.cast())
11217}
11218#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11220#[doc = "## Safety"]
11221#[doc = "  * Neon instrinsic unsafe"]
11222#[inline]
11223#[target_feature(enable = "neon")]
11224#[cfg_attr(test, assert_instr(ldr))]
11225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11226pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11227    crate::ptr::read_unaligned(ptr.cast())
11228}
11229#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11231#[doc = "## Safety"]
11232#[doc = "  * Neon instrinsic unsafe"]
11233#[inline]
11234#[target_feature(enable = "neon")]
11235#[cfg_attr(test, assert_instr(ldr))]
11236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11237pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11238    crate::ptr::read_unaligned(ptr.cast())
11239}
11240#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11242#[doc = "## Safety"]
11243#[doc = "  * Neon instrinsic unsafe"]
11244#[inline]
11245#[target_feature(enable = "neon")]
11246#[cfg_attr(test, assert_instr(ldr))]
11247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11248pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11249    crate::ptr::read_unaligned(ptr.cast())
11250}
11251#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11253#[doc = "## Safety"]
11254#[doc = "  * Neon instrinsic unsafe"]
11255#[inline]
11256#[target_feature(enable = "neon")]
11257#[cfg_attr(test, assert_instr(ldr))]
11258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11259pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11260    crate::ptr::read_unaligned(ptr.cast())
11261}
11262#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11264#[doc = "## Safety"]
11265#[doc = "  * Neon instrinsic unsafe"]
11266#[inline]
11267#[target_feature(enable = "neon")]
11268#[cfg_attr(test, assert_instr(ldr))]
11269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11270pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11271    crate::ptr::read_unaligned(ptr.cast())
11272}
11273#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11275#[doc = "## Safety"]
11276#[doc = "  * Neon instrinsic unsafe"]
11277#[inline]
11278#[target_feature(enable = "neon")]
11279#[cfg_attr(test, assert_instr(ldr))]
11280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11281pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11282    crate::ptr::read_unaligned(ptr.cast())
11283}
11284#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11286#[doc = "## Safety"]
11287#[doc = "  * Neon instrinsic unsafe"]
11288#[inline]
11289#[target_feature(enable = "neon")]
11290#[cfg_attr(test, assert_instr(ldr))]
11291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11292pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11293    crate::ptr::read_unaligned(ptr.cast())
11294}
11295#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11297#[doc = "## Safety"]
11298#[doc = "  * Neon instrinsic unsafe"]
11299#[inline]
11300#[target_feature(enable = "neon")]
11301#[cfg_attr(test, assert_instr(ldr))]
11302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11303pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11304    crate::ptr::read_unaligned(ptr.cast())
11305}
11306#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11308#[doc = "## Safety"]
11309#[doc = "  * Neon instrinsic unsafe"]
11310#[inline]
11311#[target_feature(enable = "neon")]
11312#[cfg_attr(test, assert_instr(ldr))]
11313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11314pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11315    crate::ptr::read_unaligned(ptr.cast())
11316}
11317#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11319#[doc = "## Safety"]
11320#[doc = "  * Neon instrinsic unsafe"]
11321#[inline]
11322#[target_feature(enable = "neon")]
11323#[cfg_attr(test, assert_instr(ldr))]
11324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11325pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11326    crate::ptr::read_unaligned(ptr.cast())
11327}
11328#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11330#[doc = "## Safety"]
11331#[doc = "  * Neon instrinsic unsafe"]
11332#[inline]
11333#[target_feature(enable = "neon")]
11334#[cfg_attr(test, assert_instr(ldr))]
11335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11336pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11337    crate::ptr::read_unaligned(ptr.cast())
11338}
11339#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11341#[doc = "## Safety"]
11342#[doc = "  * Neon instrinsic unsafe"]
11343#[inline]
11344#[target_feature(enable = "neon")]
11345#[cfg_attr(test, assert_instr(ldr))]
11346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11347pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11348    crate::ptr::read_unaligned(ptr.cast())
11349}
11350#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11352#[doc = "## Safety"]
11353#[doc = "  * Neon instrinsic unsafe"]
11354#[inline]
11355#[target_feature(enable = "neon")]
11356#[cfg_attr(test, assert_instr(ldr))]
11357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11358pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11359    crate::ptr::read_unaligned(ptr.cast())
11360}
11361#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11363#[doc = "## Safety"]
11364#[doc = "  * Neon instrinsic unsafe"]
11365#[inline]
11366#[target_feature(enable = "neon")]
11367#[cfg_attr(test, assert_instr(ldr))]
11368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11369pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11370    crate::ptr::read_unaligned(ptr.cast())
11371}
11372#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11374#[doc = "## Safety"]
11375#[doc = "  * Neon instrinsic unsafe"]
11376#[inline]
11377#[target_feature(enable = "neon")]
11378#[cfg_attr(test, assert_instr(ldr))]
11379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11380pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11381    crate::ptr::read_unaligned(ptr.cast())
11382}
11383#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11385#[doc = "## Safety"]
11386#[doc = "  * Neon instrinsic unsafe"]
11387#[inline]
11388#[target_feature(enable = "neon")]
11389#[cfg_attr(test, assert_instr(ldr))]
11390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11391pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11392    crate::ptr::read_unaligned(ptr.cast())
11393}
11394#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11396#[doc = "## Safety"]
11397#[doc = "  * Neon instrinsic unsafe"]
11398#[inline]
11399#[target_feature(enable = "neon")]
11400#[cfg_attr(test, assert_instr(ldr))]
11401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11402pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11403    crate::ptr::read_unaligned(ptr.cast())
11404}
11405#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11407#[doc = "## Safety"]
11408#[doc = "  * Neon instrinsic unsafe"]
11409#[inline]
11410#[target_feature(enable = "neon")]
11411#[cfg_attr(test, assert_instr(ldr))]
11412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11413pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11414    crate::ptr::read_unaligned(ptr.cast())
11415}
11416#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11418#[doc = "## Safety"]
11419#[doc = "  * Neon instrinsic unsafe"]
11420#[inline]
11421#[target_feature(enable = "neon")]
11422#[cfg_attr(test, assert_instr(ldr))]
11423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11424pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11425    crate::ptr::read_unaligned(ptr.cast())
11426}
11427#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11429#[doc = "## Safety"]
11430#[doc = "  * Neon instrinsic unsafe"]
11431#[inline]
11432#[target_feature(enable = "neon")]
11433#[cfg_attr(test, assert_instr(ldr))]
11434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11435pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11436    crate::ptr::read_unaligned(ptr.cast())
11437}
11438#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11440#[doc = "## Safety"]
11441#[doc = "  * Neon instrinsic unsafe"]
11442#[inline]
11443#[target_feature(enable = "neon")]
11444#[cfg_attr(test, assert_instr(ldr))]
11445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11446pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11447    crate::ptr::read_unaligned(ptr.cast())
11448}
11449#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11451#[doc = "## Safety"]
11452#[doc = "  * Neon instrinsic unsafe"]
11453#[inline]
11454#[target_feature(enable = "neon")]
11455#[cfg_attr(test, assert_instr(ldr))]
11456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11457pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11458    crate::ptr::read_unaligned(ptr.cast())
11459}
11460#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11462#[doc = "## Safety"]
11463#[doc = "  * Neon instrinsic unsafe"]
11464#[inline]
11465#[target_feature(enable = "neon,aes")]
11466#[cfg_attr(test, assert_instr(ldr))]
11467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11468pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11469    crate::ptr::read_unaligned(ptr.cast())
11470}
11471#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11473#[doc = "## Safety"]
11474#[doc = "  * Neon instrinsic unsafe"]
11475#[inline]
11476#[target_feature(enable = "neon,aes")]
11477#[cfg_attr(test, assert_instr(ldr))]
11478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11479pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11480    crate::ptr::read_unaligned(ptr.cast())
11481}
11482#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11484#[doc = "## Safety"]
11485#[doc = "  * Neon instrinsic unsafe"]
11486#[inline]
11487#[target_feature(enable = "neon")]
11488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11489#[cfg_attr(test, assert_instr(ld1))]
11490pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11491    unsafe extern "unadjusted" {
11492        #[cfg_attr(
11493            any(target_arch = "aarch64", target_arch = "arm64ec"),
11494            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11495        )]
11496        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11497    }
11498    _vld1_f64_x2(a)
11499}
11500#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11502#[doc = "## Safety"]
11503#[doc = "  * Neon instrinsic unsafe"]
11504#[inline]
11505#[target_feature(enable = "neon")]
11506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11507#[cfg_attr(test, assert_instr(ld1))]
11508pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11509    unsafe extern "unadjusted" {
11510        #[cfg_attr(
11511            any(target_arch = "aarch64", target_arch = "arm64ec"),
11512            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11513        )]
11514        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11515    }
11516    _vld1_f64_x3(a)
11517}
11518#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11520#[doc = "## Safety"]
11521#[doc = "  * Neon instrinsic unsafe"]
11522#[inline]
11523#[target_feature(enable = "neon")]
11524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11525#[cfg_attr(test, assert_instr(ld1))]
11526pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11527    unsafe extern "unadjusted" {
11528        #[cfg_attr(
11529            any(target_arch = "aarch64", target_arch = "arm64ec"),
11530            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11531        )]
11532        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11533    }
11534    _vld1_f64_x4(a)
11535}
11536#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11538#[doc = "## Safety"]
11539#[doc = "  * Neon instrinsic unsafe"]
11540#[inline]
11541#[target_feature(enable = "neon")]
11542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11543#[cfg_attr(test, assert_instr(ld1))]
11544pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11545    unsafe extern "unadjusted" {
11546        #[cfg_attr(
11547            any(target_arch = "aarch64", target_arch = "arm64ec"),
11548            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11549        )]
11550        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11551    }
11552    _vld1q_f64_x2(a)
11553}
11554#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11556#[doc = "## Safety"]
11557#[doc = "  * Neon instrinsic unsafe"]
11558#[inline]
11559#[target_feature(enable = "neon")]
11560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11561#[cfg_attr(test, assert_instr(ld1))]
11562pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11563    unsafe extern "unadjusted" {
11564        #[cfg_attr(
11565            any(target_arch = "aarch64", target_arch = "arm64ec"),
11566            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11567        )]
11568        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11569    }
11570    _vld1q_f64_x3(a)
11571}
11572#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11574#[doc = "## Safety"]
11575#[doc = "  * Neon instrinsic unsafe"]
11576#[inline]
11577#[target_feature(enable = "neon")]
11578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11579#[cfg_attr(test, assert_instr(ld1))]
11580pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11581    unsafe extern "unadjusted" {
11582        #[cfg_attr(
11583            any(target_arch = "aarch64", target_arch = "arm64ec"),
11584            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11585        )]
11586        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11587    }
11588    _vld1q_f64_x4(a)
11589}
11590#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11592#[doc = "## Safety"]
11593#[doc = "  * Neon instrinsic unsafe"]
11594#[inline]
11595#[target_feature(enable = "neon")]
11596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11597#[cfg_attr(test, assert_instr(ld2r))]
11598pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11599    unsafe extern "unadjusted" {
11600        #[cfg_attr(
11601            any(target_arch = "aarch64", target_arch = "arm64ec"),
11602            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11603        )]
11604        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11605    }
11606    _vld2_dup_f64(a as _)
11607}
11608#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11610#[doc = "## Safety"]
11611#[doc = "  * Neon instrinsic unsafe"]
11612#[inline]
11613#[target_feature(enable = "neon")]
11614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11615#[cfg_attr(test, assert_instr(ld2r))]
11616pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11617    unsafe extern "unadjusted" {
11618        #[cfg_attr(
11619            any(target_arch = "aarch64", target_arch = "arm64ec"),
11620            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11621        )]
11622        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11623    }
11624    _vld2q_dup_f64(a as _)
11625}
11626#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11628#[doc = "## Safety"]
11629#[doc = "  * Neon instrinsic unsafe"]
11630#[inline]
11631#[target_feature(enable = "neon")]
11632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11633#[cfg_attr(test, assert_instr(ld2r))]
11634pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11635    unsafe extern "unadjusted" {
11636        #[cfg_attr(
11637            any(target_arch = "aarch64", target_arch = "arm64ec"),
11638            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11639        )]
11640        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11641    }
11642    _vld2q_dup_s64(a as _)
11643}
11644#[doc = "Load multiple 2-element structures to two registers"]
11645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11646#[doc = "## Safety"]
11647#[doc = "  * Neon instrinsic unsafe"]
11648#[inline]
11649#[target_feature(enable = "neon")]
11650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11651#[cfg_attr(test, assert_instr(nop))]
11652pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11653    unsafe extern "unadjusted" {
11654        #[cfg_attr(
11655            any(target_arch = "aarch64", target_arch = "arm64ec"),
11656            link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11657        )]
11658        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11659    }
11660    _vld2_f64(a as _)
11661}
11662#[doc = "Load multiple 2-element structures to two registers"]
11663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11664#[doc = "## Safety"]
11665#[doc = "  * Neon instrinsic unsafe"]
11666#[inline]
11667#[target_feature(enable = "neon")]
11668#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11669#[rustc_legacy_const_generics(2)]
11670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11671pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11672    static_assert!(LANE == 0);
11673    unsafe extern "unadjusted" {
11674        #[cfg_attr(
11675            any(target_arch = "aarch64", target_arch = "arm64ec"),
11676            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11677        )]
11678        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11679    }
11680    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11681}
11682#[doc = "Load multiple 2-element structures to two registers"]
11683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11684#[doc = "## Safety"]
11685#[doc = "  * Neon instrinsic unsafe"]
11686#[inline]
11687#[target_feature(enable = "neon")]
11688#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11689#[rustc_legacy_const_generics(2)]
11690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11691pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11692    static_assert!(LANE == 0);
11693    unsafe extern "unadjusted" {
11694        #[cfg_attr(
11695            any(target_arch = "aarch64", target_arch = "arm64ec"),
11696            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11697        )]
11698        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11699    }
11700    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11701}
11702#[doc = "Load multiple 2-element structures to two registers"]
11703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11704#[doc = "## Safety"]
11705#[doc = "  * Neon instrinsic unsafe"]
11706#[inline]
11707#[target_feature(enable = "neon,aes")]
11708#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11709#[rustc_legacy_const_generics(2)]
11710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11711pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11712    static_assert!(LANE == 0);
11713    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11714}
11715#[doc = "Load multiple 2-element structures to two registers"]
11716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11717#[doc = "## Safety"]
11718#[doc = "  * Neon instrinsic unsafe"]
11719#[inline]
11720#[target_feature(enable = "neon")]
11721#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11722#[rustc_legacy_const_generics(2)]
11723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11724pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11725    static_assert!(LANE == 0);
11726    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11727}
11728#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11730#[doc = "## Safety"]
11731#[doc = "  * Neon instrinsic unsafe"]
11732#[inline]
11733#[cfg(target_endian = "little")]
11734#[target_feature(enable = "neon,aes")]
11735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11736#[cfg_attr(test, assert_instr(ld2r))]
11737pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11738    transmute(vld2q_dup_s64(transmute(a)))
11739}
11740#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11742#[doc = "## Safety"]
11743#[doc = "  * Neon instrinsic unsafe"]
11744#[inline]
11745#[cfg(target_endian = "big")]
11746#[target_feature(enable = "neon,aes")]
11747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11748#[cfg_attr(test, assert_instr(ld2r))]
11749pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11750    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11751    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11752    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11753    ret_val
11754}
11755#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11757#[doc = "## Safety"]
11758#[doc = "  * Neon instrinsic unsafe"]
11759#[inline]
11760#[cfg(target_endian = "little")]
11761#[target_feature(enable = "neon")]
11762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11763#[cfg_attr(test, assert_instr(ld2r))]
11764pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11765    transmute(vld2q_dup_s64(transmute(a)))
11766}
11767#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11769#[doc = "## Safety"]
11770#[doc = "  * Neon instrinsic unsafe"]
11771#[inline]
11772#[cfg(target_endian = "big")]
11773#[target_feature(enable = "neon")]
11774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11775#[cfg_attr(test, assert_instr(ld2r))]
11776pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11777    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11778    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11779    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11780    ret_val
11781}
11782#[doc = "Load multiple 2-element structures to two registers"]
11783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11784#[doc = "## Safety"]
11785#[doc = "  * Neon instrinsic unsafe"]
11786#[inline]
11787#[target_feature(enable = "neon")]
11788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11789#[cfg_attr(test, assert_instr(ld2))]
11790pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11791    unsafe extern "unadjusted" {
11792        #[cfg_attr(
11793            any(target_arch = "aarch64", target_arch = "arm64ec"),
11794            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11795        )]
11796        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11797    }
11798    _vld2q_f64(a as _)
11799}
11800#[doc = "Load multiple 2-element structures to two registers"]
11801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11802#[doc = "## Safety"]
11803#[doc = "  * Neon instrinsic unsafe"]
11804#[inline]
11805#[target_feature(enable = "neon")]
11806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11807#[cfg_attr(test, assert_instr(ld2))]
11808pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11809    unsafe extern "unadjusted" {
11810        #[cfg_attr(
11811            any(target_arch = "aarch64", target_arch = "arm64ec"),
11812            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11813        )]
11814        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11815    }
11816    _vld2q_s64(a as _)
11817}
11818#[doc = "Load multiple 2-element structures to two registers"]
11819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11820#[doc = "## Safety"]
11821#[doc = "  * Neon instrinsic unsafe"]
11822#[inline]
11823#[target_feature(enable = "neon")]
11824#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11825#[rustc_legacy_const_generics(2)]
11826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11827pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11828    static_assert_uimm_bits!(LANE, 1);
11829    unsafe extern "unadjusted" {
11830        #[cfg_attr(
11831            any(target_arch = "aarch64", target_arch = "arm64ec"),
11832            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11833        )]
11834        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11835            -> float64x2x2_t;
11836    }
11837    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11838}
11839#[doc = "Load multiple 2-element structures to two registers"]
11840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11841#[doc = "## Safety"]
11842#[doc = "  * Neon instrinsic unsafe"]
11843#[inline]
11844#[target_feature(enable = "neon")]
11845#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11846#[rustc_legacy_const_generics(2)]
11847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11848pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11849    static_assert_uimm_bits!(LANE, 4);
11850    unsafe extern "unadjusted" {
11851        #[cfg_attr(
11852            any(target_arch = "aarch64", target_arch = "arm64ec"),
11853            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11854        )]
11855        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11856    }
11857    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11858}
11859#[doc = "Load multiple 2-element structures to two registers"]
11860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11861#[doc = "## Safety"]
11862#[doc = "  * Neon instrinsic unsafe"]
11863#[inline]
11864#[target_feature(enable = "neon")]
11865#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11866#[rustc_legacy_const_generics(2)]
11867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11868pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11869    static_assert_uimm_bits!(LANE, 1);
11870    unsafe extern "unadjusted" {
11871        #[cfg_attr(
11872            any(target_arch = "aarch64", target_arch = "arm64ec"),
11873            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11874        )]
11875        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11876    }
11877    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11878}
11879#[doc = "Load multiple 2-element structures to two registers"]
11880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11881#[doc = "## Safety"]
11882#[doc = "  * Neon instrinsic unsafe"]
11883#[inline]
11884#[target_feature(enable = "neon,aes")]
11885#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11886#[rustc_legacy_const_generics(2)]
11887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11888pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11889    static_assert_uimm_bits!(LANE, 1);
11890    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11891}
11892#[doc = "Load multiple 2-element structures to two registers"]
11893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11894#[doc = "## Safety"]
11895#[doc = "  * Neon instrinsic unsafe"]
11896#[inline]
11897#[target_feature(enable = "neon")]
11898#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11899#[rustc_legacy_const_generics(2)]
11900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11901pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11902    static_assert_uimm_bits!(LANE, 4);
11903    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11904}
11905#[doc = "Load multiple 2-element structures to two registers"]
11906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11907#[doc = "## Safety"]
11908#[doc = "  * Neon instrinsic unsafe"]
11909#[inline]
11910#[target_feature(enable = "neon")]
11911#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11912#[rustc_legacy_const_generics(2)]
11913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11914pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11915    static_assert_uimm_bits!(LANE, 1);
11916    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11917}
11918#[doc = "Load multiple 2-element structures to two registers"]
11919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11920#[doc = "## Safety"]
11921#[doc = "  * Neon instrinsic unsafe"]
11922#[inline]
11923#[target_feature(enable = "neon")]
11924#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11925#[rustc_legacy_const_generics(2)]
11926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11927pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11928    static_assert_uimm_bits!(LANE, 4);
11929    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11930}
11931#[doc = "Load multiple 2-element structures to two registers"]
11932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11933#[doc = "## Safety"]
11934#[doc = "  * Neon instrinsic unsafe"]
11935#[inline]
11936#[cfg(target_endian = "little")]
11937#[target_feature(enable = "neon,aes")]
11938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11939#[cfg_attr(test, assert_instr(ld2))]
11940pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11941    transmute(vld2q_s64(transmute(a)))
11942}
11943#[doc = "Load multiple 2-element structures to two registers"]
11944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11945#[doc = "## Safety"]
11946#[doc = "  * Neon instrinsic unsafe"]
11947#[inline]
11948#[cfg(target_endian = "big")]
11949#[target_feature(enable = "neon,aes")]
11950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11951#[cfg_attr(test, assert_instr(ld2))]
11952pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11953    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11954    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11955    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11956    ret_val
11957}
11958#[doc = "Load multiple 2-element structures to two registers"]
11959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11960#[doc = "## Safety"]
11961#[doc = "  * Neon instrinsic unsafe"]
11962#[inline]
11963#[cfg(target_endian = "little")]
11964#[target_feature(enable = "neon")]
11965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11966#[cfg_attr(test, assert_instr(ld2))]
11967pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11968    transmute(vld2q_s64(transmute(a)))
11969}
11970#[doc = "Load multiple 2-element structures to two registers"]
11971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11972#[doc = "## Safety"]
11973#[doc = "  * Neon instrinsic unsafe"]
11974#[inline]
11975#[cfg(target_endian = "big")]
11976#[target_feature(enable = "neon")]
11977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11978#[cfg_attr(test, assert_instr(ld2))]
11979pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11980    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11981    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11982    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11983    ret_val
11984}
11985#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11987#[doc = "## Safety"]
11988#[doc = "  * Neon instrinsic unsafe"]
11989#[inline]
11990#[target_feature(enable = "neon")]
11991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11992#[cfg_attr(test, assert_instr(ld3r))]
11993pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11994    unsafe extern "unadjusted" {
11995        #[cfg_attr(
11996            any(target_arch = "aarch64", target_arch = "arm64ec"),
11997            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11998        )]
11999        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
12000    }
12001    _vld3_dup_f64(a as _)
12002}
12003#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
12005#[doc = "## Safety"]
12006#[doc = "  * Neon instrinsic unsafe"]
12007#[inline]
12008#[target_feature(enable = "neon")]
12009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12010#[cfg_attr(test, assert_instr(ld3r))]
12011pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
12012    unsafe extern "unadjusted" {
12013        #[cfg_attr(
12014            any(target_arch = "aarch64", target_arch = "arm64ec"),
12015            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
12016        )]
12017        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
12018    }
12019    _vld3q_dup_f64(a as _)
12020}
12021#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
12023#[doc = "## Safety"]
12024#[doc = "  * Neon instrinsic unsafe"]
12025#[inline]
12026#[target_feature(enable = "neon")]
12027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12028#[cfg_attr(test, assert_instr(ld3r))]
12029pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
12030    unsafe extern "unadjusted" {
12031        #[cfg_attr(
12032            any(target_arch = "aarch64", target_arch = "arm64ec"),
12033            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
12034        )]
12035        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
12036    }
12037    _vld3q_dup_s64(a as _)
12038}
12039#[doc = "Load multiple 3-element structures to three registers"]
12040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
12041#[doc = "## Safety"]
12042#[doc = "  * Neon instrinsic unsafe"]
12043#[inline]
12044#[target_feature(enable = "neon")]
12045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12046#[cfg_attr(test, assert_instr(nop))]
12047pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
12048    unsafe extern "unadjusted" {
12049        #[cfg_attr(
12050            any(target_arch = "aarch64", target_arch = "arm64ec"),
12051            link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
12052        )]
12053        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
12054    }
12055    _vld3_f64(a as _)
12056}
12057#[doc = "Load multiple 3-element structures to three registers"]
12058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
12059#[doc = "## Safety"]
12060#[doc = "  * Neon instrinsic unsafe"]
12061#[inline]
12062#[target_feature(enable = "neon")]
12063#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12064#[rustc_legacy_const_generics(2)]
12065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12066pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
12067    static_assert!(LANE == 0);
12068    unsafe extern "unadjusted" {
12069        #[cfg_attr(
12070            any(target_arch = "aarch64", target_arch = "arm64ec"),
12071            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12072        )]
12073        fn _vld3_lane_f64(
12074            a: float64x1_t,
12075            b: float64x1_t,
12076            c: float64x1_t,
12077            n: i64,
12078            ptr: *const i8,
12079        ) -> float64x1x3_t;
12080    }
12081    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12082}
12083#[doc = "Load multiple 3-element structures to three registers"]
12084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12085#[doc = "## Safety"]
12086#[doc = "  * Neon instrinsic unsafe"]
12087#[inline]
12088#[target_feature(enable = "neon,aes")]
12089#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12090#[rustc_legacy_const_generics(2)]
12091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12092pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12093    static_assert!(LANE == 0);
12094    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12095}
12096#[doc = "Load multiple 3-element structures to two registers"]
12097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12098#[doc = "## Safety"]
12099#[doc = "  * Neon instrinsic unsafe"]
12100#[inline]
12101#[target_feature(enable = "neon")]
12102#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12103#[rustc_legacy_const_generics(2)]
12104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12105pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12106    static_assert!(LANE == 0);
12107    unsafe extern "unadjusted" {
12108        #[cfg_attr(
12109            any(target_arch = "aarch64", target_arch = "arm64ec"),
12110            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12111        )]
12112        fn _vld3_lane_s64(
12113            a: int64x1_t,
12114            b: int64x1_t,
12115            c: int64x1_t,
12116            n: i64,
12117            ptr: *const i8,
12118        ) -> int64x1x3_t;
12119    }
12120    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12121}
12122#[doc = "Load multiple 3-element structures to three registers"]
12123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12124#[doc = "## Safety"]
12125#[doc = "  * Neon instrinsic unsafe"]
12126#[inline]
12127#[target_feature(enable = "neon")]
12128#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12129#[rustc_legacy_const_generics(2)]
12130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12131pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12132    static_assert!(LANE == 0);
12133    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12134}
12135#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12137#[doc = "## Safety"]
12138#[doc = "  * Neon instrinsic unsafe"]
12139#[inline]
12140#[cfg(target_endian = "little")]
12141#[target_feature(enable = "neon,aes")]
12142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12143#[cfg_attr(test, assert_instr(ld3r))]
12144pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12145    transmute(vld3q_dup_s64(transmute(a)))
12146}
12147#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12149#[doc = "## Safety"]
12150#[doc = "  * Neon instrinsic unsafe"]
12151#[inline]
12152#[cfg(target_endian = "big")]
12153#[target_feature(enable = "neon,aes")]
12154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12155#[cfg_attr(test, assert_instr(ld3r))]
12156pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12157    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12158    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12159    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12160    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12161    ret_val
12162}
12163#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12165#[doc = "## Safety"]
12166#[doc = "  * Neon instrinsic unsafe"]
12167#[inline]
12168#[cfg(target_endian = "little")]
12169#[target_feature(enable = "neon")]
12170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12171#[cfg_attr(test, assert_instr(ld3r))]
12172pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12173    transmute(vld3q_dup_s64(transmute(a)))
12174}
12175#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12177#[doc = "## Safety"]
12178#[doc = "  * Neon instrinsic unsafe"]
12179#[inline]
12180#[cfg(target_endian = "big")]
12181#[target_feature(enable = "neon")]
12182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12183#[cfg_attr(test, assert_instr(ld3r))]
12184pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12185    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12186    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12187    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12188    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12189    ret_val
12190}
12191#[doc = "Load multiple 3-element structures to three registers"]
12192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12193#[doc = "## Safety"]
12194#[doc = "  * Neon instrinsic unsafe"]
12195#[inline]
12196#[target_feature(enable = "neon")]
12197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12198#[cfg_attr(test, assert_instr(ld3))]
12199pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12200    unsafe extern "unadjusted" {
12201        #[cfg_attr(
12202            any(target_arch = "aarch64", target_arch = "arm64ec"),
12203            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12204        )]
12205        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12206    }
12207    _vld3q_f64(a as _)
12208}
12209#[doc = "Load multiple 3-element structures to three registers"]
12210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12211#[doc = "## Safety"]
12212#[doc = "  * Neon instrinsic unsafe"]
12213#[inline]
12214#[target_feature(enable = "neon")]
12215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12216#[cfg_attr(test, assert_instr(ld3))]
12217pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12218    unsafe extern "unadjusted" {
12219        #[cfg_attr(
12220            any(target_arch = "aarch64", target_arch = "arm64ec"),
12221            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12222        )]
12223        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12224    }
12225    _vld3q_s64(a as _)
12226}
12227#[doc = "Load multiple 3-element structures to three registers"]
12228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12229#[doc = "## Safety"]
12230#[doc = "  * Neon instrinsic unsafe"]
12231#[inline]
12232#[target_feature(enable = "neon")]
12233#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12234#[rustc_legacy_const_generics(2)]
12235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12236pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12237    static_assert_uimm_bits!(LANE, 1);
12238    unsafe extern "unadjusted" {
12239        #[cfg_attr(
12240            any(target_arch = "aarch64", target_arch = "arm64ec"),
12241            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12242        )]
12243        fn _vld3q_lane_f64(
12244            a: float64x2_t,
12245            b: float64x2_t,
12246            c: float64x2_t,
12247            n: i64,
12248            ptr: *const i8,
12249        ) -> float64x2x3_t;
12250    }
12251    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12252}
12253#[doc = "Load multiple 3-element structures to three registers"]
12254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12255#[doc = "## Safety"]
12256#[doc = "  * Neon instrinsic unsafe"]
12257#[inline]
12258#[target_feature(enable = "neon,aes")]
12259#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12260#[rustc_legacy_const_generics(2)]
12261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12262pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12263    static_assert_uimm_bits!(LANE, 1);
12264    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12265}
12266#[doc = "Load multiple 3-element structures to two registers"]
12267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12268#[doc = "## Safety"]
12269#[doc = "  * Neon instrinsic unsafe"]
12270#[inline]
12271#[target_feature(enable = "neon")]
12272#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12273#[rustc_legacy_const_generics(2)]
12274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12275pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12276    static_assert_uimm_bits!(LANE, 3);
12277    unsafe extern "unadjusted" {
12278        #[cfg_attr(
12279            any(target_arch = "aarch64", target_arch = "arm64ec"),
12280            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12281        )]
12282        fn _vld3q_lane_s8(
12283            a: int8x16_t,
12284            b: int8x16_t,
12285            c: int8x16_t,
12286            n: i64,
12287            ptr: *const i8,
12288        ) -> int8x16x3_t;
12289    }
12290    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12291}
12292#[doc = "Load multiple 3-element structures to two registers"]
12293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12294#[doc = "## Safety"]
12295#[doc = "  * Neon instrinsic unsafe"]
12296#[inline]
12297#[target_feature(enable = "neon")]
12298#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12299#[rustc_legacy_const_generics(2)]
12300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12301pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12302    static_assert_uimm_bits!(LANE, 1);
12303    unsafe extern "unadjusted" {
12304        #[cfg_attr(
12305            any(target_arch = "aarch64", target_arch = "arm64ec"),
12306            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12307        )]
12308        fn _vld3q_lane_s64(
12309            a: int64x2_t,
12310            b: int64x2_t,
12311            c: int64x2_t,
12312            n: i64,
12313            ptr: *const i8,
12314        ) -> int64x2x3_t;
12315    }
12316    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12317}
12318#[doc = "Load multiple 3-element structures to three registers"]
12319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12320#[doc = "## Safety"]
12321#[doc = "  * Neon instrinsic unsafe"]
12322#[inline]
12323#[target_feature(enable = "neon")]
12324#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12325#[rustc_legacy_const_generics(2)]
12326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12327pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12328    static_assert_uimm_bits!(LANE, 4);
12329    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12330}
12331#[doc = "Load multiple 3-element structures to three registers"]
12332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12333#[doc = "## Safety"]
12334#[doc = "  * Neon instrinsic unsafe"]
12335#[inline]
12336#[target_feature(enable = "neon")]
12337#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12338#[rustc_legacy_const_generics(2)]
12339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12340pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12341    static_assert_uimm_bits!(LANE, 1);
12342    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12343}
12344#[doc = "Load multiple 3-element structures to three registers"]
12345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12346#[doc = "## Safety"]
12347#[doc = "  * Neon instrinsic unsafe"]
12348#[inline]
12349#[target_feature(enable = "neon")]
12350#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12351#[rustc_legacy_const_generics(2)]
12352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12353pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12354    static_assert_uimm_bits!(LANE, 4);
12355    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12356}
12357#[doc = "Load multiple 3-element structures to three registers"]
12358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12359#[doc = "## Safety"]
12360#[doc = "  * Neon instrinsic unsafe"]
12361#[inline]
12362#[cfg(target_endian = "little")]
12363#[target_feature(enable = "neon,aes")]
12364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12365#[cfg_attr(test, assert_instr(ld3))]
12366pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12367    transmute(vld3q_s64(transmute(a)))
12368}
12369#[doc = "Load multiple 3-element structures to three registers"]
12370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12371#[doc = "## Safety"]
12372#[doc = "  * Neon instrinsic unsafe"]
12373#[inline]
12374#[cfg(target_endian = "big")]
12375#[target_feature(enable = "neon,aes")]
12376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12377#[cfg_attr(test, assert_instr(ld3))]
12378pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12379    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12380    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12381    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12382    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12383    ret_val
12384}
12385#[doc = "Load multiple 3-element structures to three registers"]
12386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12387#[doc = "## Safety"]
12388#[doc = "  * Neon instrinsic unsafe"]
12389#[inline]
12390#[cfg(target_endian = "little")]
12391#[target_feature(enable = "neon")]
12392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12393#[cfg_attr(test, assert_instr(ld3))]
12394pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12395    transmute(vld3q_s64(transmute(a)))
12396}
12397#[doc = "Load multiple 3-element structures to three registers"]
12398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12399#[doc = "## Safety"]
12400#[doc = "  * Neon instrinsic unsafe"]
12401#[inline]
12402#[cfg(target_endian = "big")]
12403#[target_feature(enable = "neon")]
12404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12405#[cfg_attr(test, assert_instr(ld3))]
12406pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12407    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12408    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12409    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12410    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12411    ret_val
12412}
12413#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12415#[doc = "## Safety"]
12416#[doc = "  * Neon instrinsic unsafe"]
12417#[inline]
12418#[target_feature(enable = "neon")]
12419#[cfg_attr(test, assert_instr(ld4r))]
12420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12421pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12422    unsafe extern "unadjusted" {
12423        #[cfg_attr(
12424            any(target_arch = "aarch64", target_arch = "arm64ec"),
12425            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12426        )]
12427        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12428    }
12429    _vld4_dup_f64(a as _)
12430}
12431#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12433#[doc = "## Safety"]
12434#[doc = "  * Neon instrinsic unsafe"]
12435#[inline]
12436#[target_feature(enable = "neon")]
12437#[cfg_attr(test, assert_instr(ld4r))]
12438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12439pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12440    unsafe extern "unadjusted" {
12441        #[cfg_attr(
12442            any(target_arch = "aarch64", target_arch = "arm64ec"),
12443            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12444        )]
12445        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12446    }
12447    _vld4q_dup_f64(a as _)
12448}
12449#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12451#[doc = "## Safety"]
12452#[doc = "  * Neon instrinsic unsafe"]
12453#[inline]
12454#[target_feature(enable = "neon")]
12455#[cfg_attr(test, assert_instr(ld4r))]
12456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12457pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12458    unsafe extern "unadjusted" {
12459        #[cfg_attr(
12460            any(target_arch = "aarch64", target_arch = "arm64ec"),
12461            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12462        )]
12463        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12464    }
12465    _vld4q_dup_s64(a as _)
12466}
12467#[doc = "Load multiple 4-element structures to four registers"]
12468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12469#[doc = "## Safety"]
12470#[doc = "  * Neon instrinsic unsafe"]
12471#[inline]
12472#[target_feature(enable = "neon")]
12473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12474#[cfg_attr(test, assert_instr(nop))]
12475pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12476    unsafe extern "unadjusted" {
12477        #[cfg_attr(
12478            any(target_arch = "aarch64", target_arch = "arm64ec"),
12479            link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12480        )]
12481        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12482    }
12483    _vld4_f64(a as _)
12484}
12485#[doc = "Load multiple 4-element structures to four registers"]
12486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12487#[doc = "## Safety"]
12488#[doc = "  * Neon instrinsic unsafe"]
12489#[inline]
12490#[target_feature(enable = "neon")]
12491#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12492#[rustc_legacy_const_generics(2)]
12493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12494pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12495    static_assert!(LANE == 0);
12496    unsafe extern "unadjusted" {
12497        #[cfg_attr(
12498            any(target_arch = "aarch64", target_arch = "arm64ec"),
12499            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12500        )]
12501        fn _vld4_lane_f64(
12502            a: float64x1_t,
12503            b: float64x1_t,
12504            c: float64x1_t,
12505            d: float64x1_t,
12506            n: i64,
12507            ptr: *const i8,
12508        ) -> float64x1x4_t;
12509    }
12510    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12511}
12512#[doc = "Load multiple 4-element structures to four registers"]
12513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12514#[doc = "## Safety"]
12515#[doc = "  * Neon instrinsic unsafe"]
12516#[inline]
12517#[target_feature(enable = "neon")]
12518#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12519#[rustc_legacy_const_generics(2)]
12520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12521pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12522    static_assert!(LANE == 0);
12523    unsafe extern "unadjusted" {
12524        #[cfg_attr(
12525            any(target_arch = "aarch64", target_arch = "arm64ec"),
12526            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12527        )]
12528        fn _vld4_lane_s64(
12529            a: int64x1_t,
12530            b: int64x1_t,
12531            c: int64x1_t,
12532            d: int64x1_t,
12533            n: i64,
12534            ptr: *const i8,
12535        ) -> int64x1x4_t;
12536    }
12537    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12538}
12539#[doc = "Load multiple 4-element structures to four registers"]
12540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12541#[doc = "## Safety"]
12542#[doc = "  * Neon instrinsic unsafe"]
12543#[inline]
12544#[target_feature(enable = "neon,aes")]
12545#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12546#[rustc_legacy_const_generics(2)]
12547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12548pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12549    static_assert!(LANE == 0);
12550    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12551}
12552#[doc = "Load multiple 4-element structures to four registers"]
12553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12554#[doc = "## Safety"]
12555#[doc = "  * Neon instrinsic unsafe"]
12556#[inline]
12557#[target_feature(enable = "neon")]
12558#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12559#[rustc_legacy_const_generics(2)]
12560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12561pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12562    static_assert!(LANE == 0);
12563    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12564}
12565#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12567#[doc = "## Safety"]
12568#[doc = "  * Neon instrinsic unsafe"]
12569#[inline]
12570#[cfg(target_endian = "little")]
12571#[target_feature(enable = "neon,aes")]
12572#[cfg_attr(test, assert_instr(ld4r))]
12573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12574pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12575    transmute(vld4q_dup_s64(transmute(a)))
12576}
12577#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12579#[doc = "## Safety"]
12580#[doc = "  * Neon instrinsic unsafe"]
12581#[inline]
12582#[cfg(target_endian = "big")]
12583#[target_feature(enable = "neon,aes")]
12584#[cfg_attr(test, assert_instr(ld4r))]
12585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12586pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12587    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12588    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12589    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12590    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12591    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12592    ret_val
12593}
12594#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12596#[doc = "## Safety"]
12597#[doc = "  * Neon instrinsic unsafe"]
12598#[inline]
12599#[cfg(target_endian = "little")]
12600#[target_feature(enable = "neon")]
12601#[cfg_attr(test, assert_instr(ld4r))]
12602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12603pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12604    transmute(vld4q_dup_s64(transmute(a)))
12605}
12606#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12608#[doc = "## Safety"]
12609#[doc = "  * Neon instrinsic unsafe"]
12610#[inline]
12611#[cfg(target_endian = "big")]
12612#[target_feature(enable = "neon")]
12613#[cfg_attr(test, assert_instr(ld4r))]
12614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12615pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12616    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12617    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12618    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12619    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12620    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12621    ret_val
12622}
12623#[doc = "Load multiple 4-element structures to four registers"]
12624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12625#[doc = "## Safety"]
12626#[doc = "  * Neon instrinsic unsafe"]
12627#[inline]
12628#[target_feature(enable = "neon")]
12629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12630#[cfg_attr(test, assert_instr(ld4))]
12631pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12632    unsafe extern "unadjusted" {
12633        #[cfg_attr(
12634            any(target_arch = "aarch64", target_arch = "arm64ec"),
12635            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12636        )]
12637        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12638    }
12639    _vld4q_f64(a as _)
12640}
12641#[doc = "Load multiple 4-element structures to four registers"]
12642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12643#[doc = "## Safety"]
12644#[doc = "  * Neon instrinsic unsafe"]
12645#[inline]
12646#[target_feature(enable = "neon")]
12647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12648#[cfg_attr(test, assert_instr(ld4))]
12649pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12650    unsafe extern "unadjusted" {
12651        #[cfg_attr(
12652            any(target_arch = "aarch64", target_arch = "arm64ec"),
12653            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12654        )]
12655        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12656    }
12657    _vld4q_s64(a as _)
12658}
12659#[doc = "Load multiple 4-element structures to four registers"]
12660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12661#[doc = "## Safety"]
12662#[doc = "  * Neon instrinsic unsafe"]
12663#[inline]
12664#[target_feature(enable = "neon")]
12665#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12666#[rustc_legacy_const_generics(2)]
12667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12668pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12669    static_assert_uimm_bits!(LANE, 1);
12670    unsafe extern "unadjusted" {
12671        #[cfg_attr(
12672            any(target_arch = "aarch64", target_arch = "arm64ec"),
12673            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12674        )]
12675        fn _vld4q_lane_f64(
12676            a: float64x2_t,
12677            b: float64x2_t,
12678            c: float64x2_t,
12679            d: float64x2_t,
12680            n: i64,
12681            ptr: *const i8,
12682        ) -> float64x2x4_t;
12683    }
12684    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12685}
12686#[doc = "Load multiple 4-element structures to four registers"]
12687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12688#[doc = "## Safety"]
12689#[doc = "  * Neon instrinsic unsafe"]
12690#[inline]
12691#[target_feature(enable = "neon")]
12692#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12693#[rustc_legacy_const_generics(2)]
12694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12695pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12696    static_assert_uimm_bits!(LANE, 3);
12697    unsafe extern "unadjusted" {
12698        #[cfg_attr(
12699            any(target_arch = "aarch64", target_arch = "arm64ec"),
12700            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12701        )]
12702        fn _vld4q_lane_s8(
12703            a: int8x16_t,
12704            b: int8x16_t,
12705            c: int8x16_t,
12706            d: int8x16_t,
12707            n: i64,
12708            ptr: *const i8,
12709        ) -> int8x16x4_t;
12710    }
12711    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12712}
12713#[doc = "Load multiple 4-element structures to four registers"]
12714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12715#[doc = "## Safety"]
12716#[doc = "  * Neon instrinsic unsafe"]
12717#[inline]
12718#[target_feature(enable = "neon")]
12719#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12720#[rustc_legacy_const_generics(2)]
12721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12722pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12723    static_assert_uimm_bits!(LANE, 1);
12724    unsafe extern "unadjusted" {
12725        #[cfg_attr(
12726            any(target_arch = "aarch64", target_arch = "arm64ec"),
12727            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12728        )]
12729        fn _vld4q_lane_s64(
12730            a: int64x2_t,
12731            b: int64x2_t,
12732            c: int64x2_t,
12733            d: int64x2_t,
12734            n: i64,
12735            ptr: *const i8,
12736        ) -> int64x2x4_t;
12737    }
12738    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12739}
12740#[doc = "Load multiple 4-element structures to four registers"]
12741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12742#[doc = "## Safety"]
12743#[doc = "  * Neon instrinsic unsafe"]
12744#[inline]
12745#[target_feature(enable = "neon,aes")]
12746#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12747#[rustc_legacy_const_generics(2)]
12748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12749pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12750    static_assert_uimm_bits!(LANE, 1);
12751    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12752}
12753#[doc = "Load multiple 4-element structures to four registers"]
12754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12755#[doc = "## Safety"]
12756#[doc = "  * Neon instrinsic unsafe"]
12757#[inline]
12758#[target_feature(enable = "neon")]
12759#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12760#[rustc_legacy_const_generics(2)]
12761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12762pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12763    static_assert_uimm_bits!(LANE, 4);
12764    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12765}
12766#[doc = "Load multiple 4-element structures to four registers"]
12767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12768#[doc = "## Safety"]
12769#[doc = "  * Neon instrinsic unsafe"]
12770#[inline]
12771#[target_feature(enable = "neon")]
12772#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12773#[rustc_legacy_const_generics(2)]
12774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12775pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12776    static_assert_uimm_bits!(LANE, 1);
12777    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12778}
12779#[doc = "Load multiple 4-element structures to four registers"]
12780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12781#[doc = "## Safety"]
12782#[doc = "  * Neon instrinsic unsafe"]
12783#[inline]
12784#[target_feature(enable = "neon")]
12785#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12786#[rustc_legacy_const_generics(2)]
12787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12788pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12789    static_assert_uimm_bits!(LANE, 4);
12790    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12791}
12792#[doc = "Load multiple 4-element structures to four registers"]
12793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12794#[doc = "## Safety"]
12795#[doc = "  * Neon instrinsic unsafe"]
12796#[inline]
12797#[cfg(target_endian = "little")]
12798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12799#[target_feature(enable = "neon,aes")]
12800#[cfg_attr(test, assert_instr(ld4))]
12801pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12802    transmute(vld4q_s64(transmute(a)))
12803}
12804#[doc = "Load multiple 4-element structures to four registers"]
12805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12806#[doc = "## Safety"]
12807#[doc = "  * Neon instrinsic unsafe"]
12808#[inline]
12809#[cfg(target_endian = "big")]
12810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12811#[target_feature(enable = "neon,aes")]
12812#[cfg_attr(test, assert_instr(ld4))]
12813pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12814    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12815    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12816    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12817    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12818    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12819    ret_val
12820}
12821#[doc = "Load multiple 4-element structures to four registers"]
12822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12823#[doc = "## Safety"]
12824#[doc = "  * Neon instrinsic unsafe"]
12825#[inline]
12826#[cfg(target_endian = "little")]
12827#[target_feature(enable = "neon")]
12828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12829#[cfg_attr(test, assert_instr(ld4))]
12830pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12831    transmute(vld4q_s64(transmute(a)))
12832}
12833#[doc = "Load multiple 4-element structures to four registers"]
12834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12835#[doc = "## Safety"]
12836#[doc = "  * Neon instrinsic unsafe"]
12837#[inline]
12838#[cfg(target_endian = "big")]
12839#[target_feature(enable = "neon")]
12840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12841#[cfg_attr(test, assert_instr(ld4))]
12842pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12843    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12844    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12845    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12846    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12847    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12848    ret_val
12849}
12850#[doc = "Lookup table read with 2-bit indices"]
12851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12852#[doc = "## Safety"]
12853#[doc = "  * Neon instrinsic unsafe"]
12854#[inline]
12855#[target_feature(enable = "neon,lut")]
12856#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12857#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12858#[rustc_legacy_const_generics(2)]
12859pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12860    static_assert!(LANE >= 0 && LANE <= 1);
12861    unsafe extern "unadjusted" {
12862        #[cfg_attr(
12863            any(target_arch = "aarch64", target_arch = "arm64ec"),
12864            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12865        )]
12866        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12867    }
12868    _vluti2_lane_s8(a, b, LANE)
12869}
12870#[doc = "Lookup table read with 2-bit indices"]
12871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12872#[doc = "## Safety"]
12873#[doc = "  * Neon instrinsic unsafe"]
12874#[inline]
12875#[target_feature(enable = "neon,lut")]
12876#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12877#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12878#[rustc_legacy_const_generics(2)]
12879pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12880    static_assert!(LANE >= 0 && LANE <= 1);
12881    unsafe extern "unadjusted" {
12882        #[cfg_attr(
12883            any(target_arch = "aarch64", target_arch = "arm64ec"),
12884            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12885        )]
12886        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12887    }
12888    _vluti2q_lane_s8(a, b, LANE)
12889}
12890#[doc = "Lookup table read with 2-bit indices"]
12891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12892#[doc = "## Safety"]
12893#[doc = "  * Neon instrinsic unsafe"]
12894#[inline]
12895#[target_feature(enable = "neon,lut")]
12896#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12897#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12898#[rustc_legacy_const_generics(2)]
12899pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12900    static_assert!(LANE >= 0 && LANE <= 3);
12901    unsafe extern "unadjusted" {
12902        #[cfg_attr(
12903            any(target_arch = "aarch64", target_arch = "arm64ec"),
12904            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12905        )]
12906        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12907    }
12908    _vluti2_lane_s16(a, b, LANE)
12909}
12910#[doc = "Lookup table read with 2-bit indices"]
12911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12912#[doc = "## Safety"]
12913#[doc = "  * Neon instrinsic unsafe"]
12914#[inline]
12915#[target_feature(enable = "neon,lut")]
12916#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12917#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12918#[rustc_legacy_const_generics(2)]
12919pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12920    static_assert!(LANE >= 0 && LANE <= 3);
12921    unsafe extern "unadjusted" {
12922        #[cfg_attr(
12923            any(target_arch = "aarch64", target_arch = "arm64ec"),
12924            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12925        )]
12926        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12927    }
12928    _vluti2q_lane_s16(a, b, LANE)
12929}
12930#[doc = "Lookup table read with 2-bit indices"]
12931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12932#[doc = "## Safety"]
12933#[doc = "  * Neon instrinsic unsafe"]
12934#[inline]
12935#[target_feature(enable = "neon,lut")]
12936#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12937#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12938#[rustc_legacy_const_generics(2)]
12939pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12940    static_assert!(LANE >= 0 && LANE <= 1);
12941    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12942}
12943#[doc = "Lookup table read with 2-bit indices"]
12944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12945#[doc = "## Safety"]
12946#[doc = "  * Neon instrinsic unsafe"]
12947#[inline]
12948#[target_feature(enable = "neon,lut")]
12949#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12950#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12951#[rustc_legacy_const_generics(2)]
12952pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12953    static_assert!(LANE >= 0 && LANE <= 1);
12954    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12955}
12956#[doc = "Lookup table read with 2-bit indices"]
12957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12958#[doc = "## Safety"]
12959#[doc = "  * Neon instrinsic unsafe"]
12960#[inline]
12961#[target_feature(enable = "neon,lut")]
12962#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12963#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12964#[rustc_legacy_const_generics(2)]
12965pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12966    static_assert!(LANE >= 0 && LANE <= 3);
12967    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12968}
12969#[doc = "Lookup table read with 2-bit indices"]
12970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12971#[doc = "## Safety"]
12972#[doc = "  * Neon instrinsic unsafe"]
12973#[inline]
12974#[target_feature(enable = "neon,lut")]
12975#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12976#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12977#[rustc_legacy_const_generics(2)]
12978pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12979    static_assert!(LANE >= 0 && LANE <= 3);
12980    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12981}
12982#[doc = "Lookup table read with 2-bit indices"]
12983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12984#[doc = "## Safety"]
12985#[doc = "  * Neon instrinsic unsafe"]
12986#[inline]
12987#[target_feature(enable = "neon,lut")]
12988#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12989#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12990#[rustc_legacy_const_generics(2)]
12991pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12992    static_assert!(LANE >= 0 && LANE <= 1);
12993    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12994}
12995#[doc = "Lookup table read with 2-bit indices"]
12996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12997#[doc = "## Safety"]
12998#[doc = "  * Neon instrinsic unsafe"]
12999#[inline]
13000#[target_feature(enable = "neon,lut")]
13001#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13002#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13003#[rustc_legacy_const_generics(2)]
13004pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13005    static_assert!(LANE >= 0 && LANE <= 1);
13006    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
13007}
13008#[doc = "Lookup table read with 2-bit indices"]
13009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
13010#[doc = "## Safety"]
13011#[doc = "  * Neon instrinsic unsafe"]
13012#[inline]
13013#[target_feature(enable = "neon,lut")]
13014#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13015#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13016#[rustc_legacy_const_generics(2)]
13017pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
13018    static_assert!(LANE >= 0 && LANE <= 3);
13019    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
13020}
13021#[doc = "Lookup table read with 2-bit indices"]
13022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
13023#[doc = "## Safety"]
13024#[doc = "  * Neon instrinsic unsafe"]
13025#[inline]
13026#[target_feature(enable = "neon,lut")]
13027#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13028#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13029#[rustc_legacy_const_generics(2)]
13030pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
13031    static_assert!(LANE >= 0 && LANE <= 3);
13032    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
13033}
13034#[doc = "Lookup table read with 4-bit indices"]
13035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13036#[doc = "## Safety"]
13037#[doc = "  * Neon instrinsic unsafe"]
13038#[inline]
13039#[target_feature(enable = "neon,lut,fp16")]
13040#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13041#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13042#[rustc_legacy_const_generics(2)]
13043pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13044    static_assert!(LANE >= 0 && LANE <= 1);
13045    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13046}
13047#[doc = "Lookup table read with 4-bit indices"]
13048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13049#[doc = "## Safety"]
13050#[doc = "  * Neon instrinsic unsafe"]
13051#[inline]
13052#[target_feature(enable = "neon,lut")]
13053#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13054#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13055#[rustc_legacy_const_generics(2)]
13056pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13057    static_assert!(LANE >= 0 && LANE <= 1);
13058    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13059}
13060#[doc = "Lookup table read with 4-bit indices"]
13061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13062#[doc = "## Safety"]
13063#[doc = "  * Neon instrinsic unsafe"]
13064#[inline]
13065#[target_feature(enable = "neon,lut")]
13066#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13067#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13068#[rustc_legacy_const_generics(2)]
13069pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13070    static_assert!(LANE >= 0 && LANE <= 1);
13071    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13072}
13073#[doc = "Lookup table read with 4-bit indices"]
13074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13075#[doc = "## Safety"]
13076#[doc = "  * Neon instrinsic unsafe"]
13077#[inline]
13078#[target_feature(enable = "neon,lut")]
13079#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13080#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13081#[rustc_legacy_const_generics(2)]
13082pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13083    static_assert!(LANE >= 0 && LANE <= 1);
13084    unsafe extern "unadjusted" {
13085        #[cfg_attr(
13086            any(target_arch = "aarch64", target_arch = "arm64ec"),
13087            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13088        )]
13089        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13090    }
13091    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13092}
13093#[doc = "Lookup table read with 4-bit indices"]
13094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13095#[doc = "## Safety"]
13096#[doc = "  * Neon instrinsic unsafe"]
13097#[inline]
13098#[target_feature(enable = "neon,lut")]
13099#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13100#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13101#[rustc_legacy_const_generics(2)]
13102pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13103    static_assert!(LANE == 0);
13104    unsafe extern "unadjusted" {
13105        #[cfg_attr(
13106            any(target_arch = "aarch64", target_arch = "arm64ec"),
13107            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13108        )]
13109        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13110    }
13111    _vluti4q_lane_s8(a, b, LANE)
13112}
13113#[doc = "Lookup table read with 4-bit indices"]
13114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13115#[doc = "## Safety"]
13116#[doc = "  * Neon instrinsic unsafe"]
13117#[inline]
13118#[target_feature(enable = "neon,lut")]
13119#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13120#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13121#[rustc_legacy_const_generics(2)]
13122pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13123    static_assert!(LANE == 0);
13124    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13125}
13126#[doc = "Lookup table read with 4-bit indices"]
13127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13128#[doc = "## Safety"]
13129#[doc = "  * Neon instrinsic unsafe"]
13130#[inline]
13131#[target_feature(enable = "neon,lut")]
13132#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13133#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13134#[rustc_legacy_const_generics(2)]
13135pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13136    static_assert!(LANE == 0);
13137    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13138}
13139#[doc = "Lookup table read with 4-bit indices"]
13140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13141#[doc = "## Safety"]
13142#[doc = "  * Neon instrinsic unsafe"]
13143#[inline]
13144#[target_feature(enable = "neon,lut,fp16")]
13145#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13146#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13147#[rustc_legacy_const_generics(2)]
13148pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13149    a: float16x8x2_t,
13150    b: uint8x16_t,
13151) -> float16x8_t {
13152    static_assert!(LANE >= 0 && LANE <= 3);
13153    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13154}
13155#[doc = "Lookup table read with 4-bit indices"]
13156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13157#[doc = "## Safety"]
13158#[doc = "  * Neon instrinsic unsafe"]
13159#[inline]
13160#[target_feature(enable = "neon,lut")]
13161#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13162#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13163#[rustc_legacy_const_generics(2)]
13164pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13165    static_assert!(LANE >= 0 && LANE <= 3);
13166    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13167}
13168#[doc = "Lookup table read with 4-bit indices"]
13169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13170#[doc = "## Safety"]
13171#[doc = "  * Neon instrinsic unsafe"]
13172#[inline]
13173#[target_feature(enable = "neon,lut")]
13174#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13175#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13176#[rustc_legacy_const_generics(2)]
13177pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13178    static_assert!(LANE >= 0 && LANE <= 3);
13179    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13180}
13181#[doc = "Lookup table read with 4-bit indices"]
13182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13183#[doc = "## Safety"]
13184#[doc = "  * Neon instrinsic unsafe"]
13185#[inline]
13186#[target_feature(enable = "neon,lut")]
13187#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13188#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13189#[rustc_legacy_const_generics(2)]
13190pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13191    static_assert!(LANE >= 0 && LANE <= 3);
13192    unsafe extern "unadjusted" {
13193        #[cfg_attr(
13194            any(target_arch = "aarch64", target_arch = "arm64ec"),
13195            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13196        )]
13197        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13198    }
13199    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13200}
13201#[doc = "Lookup table read with 4-bit indices"]
13202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13203#[doc = "## Safety"]
13204#[doc = "  * Neon instrinsic unsafe"]
13205#[inline]
13206#[target_feature(enable = "neon,lut")]
13207#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13208#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13209#[rustc_legacy_const_generics(2)]
13210pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13211    static_assert!(LANE >= 0 && LANE <= 1);
13212    unsafe extern "unadjusted" {
13213        #[cfg_attr(
13214            any(target_arch = "aarch64", target_arch = "arm64ec"),
13215            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13216        )]
13217        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13218    }
13219    _vluti4q_laneq_s8(a, b, LANE)
13220}
13221#[doc = "Lookup table read with 4-bit indices"]
13222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13223#[doc = "## Safety"]
13224#[doc = "  * Neon instrinsic unsafe"]
13225#[inline]
13226#[target_feature(enable = "neon,lut")]
13227#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13228#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13229#[rustc_legacy_const_generics(2)]
13230pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13231    static_assert!(LANE >= 0 && LANE <= 1);
13232    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13233}
13234#[doc = "Lookup table read with 4-bit indices"]
13235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13236#[doc = "## Safety"]
13237#[doc = "  * Neon instrinsic unsafe"]
13238#[inline]
13239#[target_feature(enable = "neon,lut")]
13240#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13241#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13242#[rustc_legacy_const_generics(2)]
13243pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13244    static_assert!(LANE >= 0 && LANE <= 1);
13245    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13246}
13247#[doc = "Maximum (vector)"]
13248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13249#[inline]
13250#[target_feature(enable = "neon")]
13251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13252#[cfg_attr(test, assert_instr(fmax))]
13253pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13254    unsafe extern "unadjusted" {
13255        #[cfg_attr(
13256            any(target_arch = "aarch64", target_arch = "arm64ec"),
13257            link_name = "llvm.aarch64.neon.fmax.v1f64"
13258        )]
13259        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13260    }
13261    unsafe { _vmax_f64(a, b) }
13262}
13263#[doc = "Maximum (vector)"]
13264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13265#[inline]
13266#[target_feature(enable = "neon")]
13267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13268#[cfg_attr(test, assert_instr(fmax))]
13269pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13270    unsafe extern "unadjusted" {
13271        #[cfg_attr(
13272            any(target_arch = "aarch64", target_arch = "arm64ec"),
13273            link_name = "llvm.aarch64.neon.fmax.v2f64"
13274        )]
13275        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13276    }
13277    unsafe { _vmaxq_f64(a, b) }
13278}
13279#[doc = "Maximum (vector)"]
13280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13281#[inline]
13282#[target_feature(enable = "neon,fp16")]
13283#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13284#[cfg(not(target_arch = "arm64ec"))]
13285#[cfg_attr(test, assert_instr(fmax))]
13286pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13287    unsafe extern "unadjusted" {
13288        #[cfg_attr(
13289            any(target_arch = "aarch64", target_arch = "arm64ec"),
13290            link_name = "llvm.aarch64.neon.fmax.f16"
13291        )]
13292        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13293    }
13294    unsafe { _vmaxh_f16(a, b) }
13295}
13296#[doc = "Floating-point Maximum Number (vector)"]
13297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13298#[inline]
13299#[target_feature(enable = "neon")]
13300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13301#[cfg_attr(test, assert_instr(fmaxnm))]
13302pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13303    unsafe { simd_fmax(a, b) }
13304}
13305#[doc = "Floating-point Maximum Number (vector)"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13307#[inline]
13308#[target_feature(enable = "neon")]
13309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13310#[cfg_attr(test, assert_instr(fmaxnm))]
13311pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13312    unsafe { simd_fmax(a, b) }
13313}
13314#[doc = "Floating-point Maximum Number"]
13315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13316#[inline]
13317#[target_feature(enable = "neon,fp16")]
13318#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13319#[cfg(not(target_arch = "arm64ec"))]
13320#[cfg_attr(test, assert_instr(fmaxnm))]
13321pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13322    f16::max(a, b)
13323}
13324#[doc = "Floating-point maximum number across vector"]
13325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13326#[inline]
13327#[target_feature(enable = "neon,fp16")]
13328#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13329#[cfg(not(target_arch = "arm64ec"))]
13330#[cfg_attr(test, assert_instr(fmaxnmv))]
13331pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13332    unsafe { simd_reduce_max(a) }
13333}
13334#[doc = "Floating-point maximum number across vector"]
13335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13336#[inline]
13337#[target_feature(enable = "neon,fp16")]
13338#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13339#[cfg(not(target_arch = "arm64ec"))]
13340#[cfg_attr(test, assert_instr(fmaxnmv))]
13341pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13342    unsafe { simd_reduce_max(a) }
13343}
13344#[doc = "Floating-point maximum number across vector"]
13345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13346#[inline]
13347#[target_feature(enable = "neon")]
13348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13349#[cfg_attr(test, assert_instr(fmaxnmp))]
13350pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13351    unsafe { simd_reduce_max(a) }
13352}
13353#[doc = "Floating-point maximum number across vector"]
13354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13355#[inline]
13356#[target_feature(enable = "neon")]
13357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13358#[cfg_attr(test, assert_instr(fmaxnmp))]
13359pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13360    unsafe { simd_reduce_max(a) }
13361}
13362#[doc = "Floating-point maximum number across vector"]
13363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13364#[inline]
13365#[target_feature(enable = "neon")]
13366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13367#[cfg_attr(test, assert_instr(fmaxnmv))]
13368pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13369    unsafe { simd_reduce_max(a) }
13370}
13371#[doc = "Floating-point maximum number across vector"]
13372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13373#[inline]
13374#[target_feature(enable = "neon,fp16")]
13375#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13376#[cfg(not(target_arch = "arm64ec"))]
13377#[cfg_attr(test, assert_instr(fmaxv))]
13378pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13379    unsafe extern "unadjusted" {
13380        #[cfg_attr(
13381            any(target_arch = "aarch64", target_arch = "arm64ec"),
13382            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13383        )]
13384        fn _vmaxv_f16(a: float16x4_t) -> f16;
13385    }
13386    unsafe { _vmaxv_f16(a) }
13387}
13388#[doc = "Floating-point maximum number across vector"]
13389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13390#[inline]
13391#[target_feature(enable = "neon,fp16")]
13392#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13393#[cfg(not(target_arch = "arm64ec"))]
13394#[cfg_attr(test, assert_instr(fmaxv))]
13395pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13396    unsafe extern "unadjusted" {
13397        #[cfg_attr(
13398            any(target_arch = "aarch64", target_arch = "arm64ec"),
13399            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13400        )]
13401        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13402    }
13403    unsafe { _vmaxvq_f16(a) }
13404}
13405#[doc = "Horizontal vector max."]
13406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13407#[inline]
13408#[target_feature(enable = "neon")]
13409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13410#[cfg_attr(test, assert_instr(fmaxp))]
13411pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13412    unsafe extern "unadjusted" {
13413        #[cfg_attr(
13414            any(target_arch = "aarch64", target_arch = "arm64ec"),
13415            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13416        )]
13417        fn _vmaxv_f32(a: float32x2_t) -> f32;
13418    }
13419    unsafe { _vmaxv_f32(a) }
13420}
13421#[doc = "Horizontal vector max."]
13422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13423#[inline]
13424#[target_feature(enable = "neon")]
13425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13426#[cfg_attr(test, assert_instr(fmaxv))]
13427pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13428    unsafe extern "unadjusted" {
13429        #[cfg_attr(
13430            any(target_arch = "aarch64", target_arch = "arm64ec"),
13431            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13432        )]
13433        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13434    }
13435    unsafe { _vmaxvq_f32(a) }
13436}
13437#[doc = "Horizontal vector max."]
13438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13439#[inline]
13440#[target_feature(enable = "neon")]
13441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13442#[cfg_attr(test, assert_instr(fmaxp))]
13443pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13444    unsafe extern "unadjusted" {
13445        #[cfg_attr(
13446            any(target_arch = "aarch64", target_arch = "arm64ec"),
13447            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13448        )]
13449        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13450    }
13451    unsafe { _vmaxvq_f64(a) }
13452}
13453#[doc = "Horizontal vector max."]
13454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13455#[inline]
13456#[target_feature(enable = "neon")]
13457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13458#[cfg_attr(test, assert_instr(smaxv))]
13459pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13460    unsafe { simd_reduce_max(a) }
13461}
13462#[doc = "Horizontal vector max."]
13463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13464#[inline]
13465#[target_feature(enable = "neon")]
13466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13467#[cfg_attr(test, assert_instr(smaxv))]
13468pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13469    unsafe { simd_reduce_max(a) }
13470}
13471#[doc = "Horizontal vector max."]
13472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13473#[inline]
13474#[target_feature(enable = "neon")]
13475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13476#[cfg_attr(test, assert_instr(smaxv))]
13477pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13478    unsafe { simd_reduce_max(a) }
13479}
13480#[doc = "Horizontal vector max."]
13481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13482#[inline]
13483#[target_feature(enable = "neon")]
13484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13485#[cfg_attr(test, assert_instr(smaxv))]
13486pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13487    unsafe { simd_reduce_max(a) }
13488}
13489#[doc = "Horizontal vector max."]
13490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13491#[inline]
13492#[target_feature(enable = "neon")]
13493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13494#[cfg_attr(test, assert_instr(smaxp))]
13495pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13496    unsafe { simd_reduce_max(a) }
13497}
13498#[doc = "Horizontal vector max."]
13499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13500#[inline]
13501#[target_feature(enable = "neon")]
13502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13503#[cfg_attr(test, assert_instr(smaxv))]
13504pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13505    unsafe { simd_reduce_max(a) }
13506}
13507#[doc = "Horizontal vector max."]
13508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13509#[inline]
13510#[target_feature(enable = "neon")]
13511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13512#[cfg_attr(test, assert_instr(umaxv))]
13513pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13514    unsafe { simd_reduce_max(a) }
13515}
13516#[doc = "Horizontal vector max."]
13517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13518#[inline]
13519#[target_feature(enable = "neon")]
13520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13521#[cfg_attr(test, assert_instr(umaxv))]
13522pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13523    unsafe { simd_reduce_max(a) }
13524}
13525#[doc = "Horizontal vector max."]
13526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13527#[inline]
13528#[target_feature(enable = "neon")]
13529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13530#[cfg_attr(test, assert_instr(umaxv))]
13531pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13532    unsafe { simd_reduce_max(a) }
13533}
13534#[doc = "Horizontal vector max."]
13535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13536#[inline]
13537#[target_feature(enable = "neon")]
13538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13539#[cfg_attr(test, assert_instr(umaxv))]
13540pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13541    unsafe { simd_reduce_max(a) }
13542}
13543#[doc = "Horizontal vector max."]
13544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13545#[inline]
13546#[target_feature(enable = "neon")]
13547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13548#[cfg_attr(test, assert_instr(umaxp))]
13549pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13550    unsafe { simd_reduce_max(a) }
13551}
13552#[doc = "Horizontal vector max."]
13553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13554#[inline]
13555#[target_feature(enable = "neon")]
13556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13557#[cfg_attr(test, assert_instr(umaxv))]
13558pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13559    unsafe { simd_reduce_max(a) }
13560}
13561#[doc = "Minimum (vector)"]
13562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13563#[inline]
13564#[target_feature(enable = "neon")]
13565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13566#[cfg_attr(test, assert_instr(fmin))]
13567pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13568    unsafe extern "unadjusted" {
13569        #[cfg_attr(
13570            any(target_arch = "aarch64", target_arch = "arm64ec"),
13571            link_name = "llvm.aarch64.neon.fmin.v1f64"
13572        )]
13573        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13574    }
13575    unsafe { _vmin_f64(a, b) }
13576}
13577#[doc = "Minimum (vector)"]
13578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13579#[inline]
13580#[target_feature(enable = "neon")]
13581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13582#[cfg_attr(test, assert_instr(fmin))]
13583pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13584    unsafe extern "unadjusted" {
13585        #[cfg_attr(
13586            any(target_arch = "aarch64", target_arch = "arm64ec"),
13587            link_name = "llvm.aarch64.neon.fmin.v2f64"
13588        )]
13589        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13590    }
13591    unsafe { _vminq_f64(a, b) }
13592}
13593#[doc = "Minimum (vector)"]
13594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13595#[inline]
13596#[target_feature(enable = "neon,fp16")]
13597#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13598#[cfg(not(target_arch = "arm64ec"))]
13599#[cfg_attr(test, assert_instr(fmin))]
13600pub fn vminh_f16(a: f16, b: f16) -> f16 {
13601    unsafe extern "unadjusted" {
13602        #[cfg_attr(
13603            any(target_arch = "aarch64", target_arch = "arm64ec"),
13604            link_name = "llvm.aarch64.neon.fmin.f16"
13605        )]
13606        fn _vminh_f16(a: f16, b: f16) -> f16;
13607    }
13608    unsafe { _vminh_f16(a, b) }
13609}
13610#[doc = "Floating-point Minimum Number (vector)"]
13611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13612#[inline]
13613#[target_feature(enable = "neon")]
13614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13615#[cfg_attr(test, assert_instr(fminnm))]
13616pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13617    unsafe { simd_fmin(a, b) }
13618}
13619#[doc = "Floating-point Minimum Number (vector)"]
13620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13621#[inline]
13622#[target_feature(enable = "neon")]
13623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13624#[cfg_attr(test, assert_instr(fminnm))]
13625pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13626    unsafe { simd_fmin(a, b) }
13627}
13628#[doc = "Floating-point Minimum Number"]
13629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13630#[inline]
13631#[target_feature(enable = "neon,fp16")]
13632#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13633#[cfg(not(target_arch = "arm64ec"))]
13634#[cfg_attr(test, assert_instr(fminnm))]
13635pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13636    f16::min(a, b)
13637}
13638#[doc = "Floating-point minimum number across vector"]
13639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13640#[inline]
13641#[target_feature(enable = "neon,fp16")]
13642#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13643#[cfg(not(target_arch = "arm64ec"))]
13644#[cfg_attr(test, assert_instr(fminnmv))]
13645pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13646    unsafe { simd_reduce_min(a) }
13647}
13648#[doc = "Floating-point minimum number across vector"]
13649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13650#[inline]
13651#[target_feature(enable = "neon,fp16")]
13652#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13653#[cfg(not(target_arch = "arm64ec"))]
13654#[cfg_attr(test, assert_instr(fminnmv))]
13655pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13656    unsafe { simd_reduce_min(a) }
13657}
13658#[doc = "Floating-point minimum number across vector"]
13659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13660#[inline]
13661#[target_feature(enable = "neon")]
13662#[cfg_attr(test, assert_instr(fminnmp))]
13663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13664pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13665    unsafe { simd_reduce_min(a) }
13666}
13667#[doc = "Floating-point minimum number across vector"]
13668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13669#[inline]
13670#[target_feature(enable = "neon")]
13671#[cfg_attr(test, assert_instr(fminnmp))]
13672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13673pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13674    unsafe { simd_reduce_min(a) }
13675}
13676#[doc = "Floating-point minimum number across vector"]
13677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13678#[inline]
13679#[target_feature(enable = "neon")]
13680#[cfg_attr(test, assert_instr(fminnmv))]
13681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13682pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13683    unsafe { simd_reduce_min(a) }
13684}
13685#[doc = "Floating-point minimum number across vector"]
13686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13687#[inline]
13688#[target_feature(enable = "neon,fp16")]
13689#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13690#[cfg(not(target_arch = "arm64ec"))]
13691#[cfg_attr(test, assert_instr(fminv))]
13692pub fn vminv_f16(a: float16x4_t) -> f16 {
13693    unsafe extern "unadjusted" {
13694        #[cfg_attr(
13695            any(target_arch = "aarch64", target_arch = "arm64ec"),
13696            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13697        )]
13698        fn _vminv_f16(a: float16x4_t) -> f16;
13699    }
13700    unsafe { _vminv_f16(a) }
13701}
13702#[doc = "Floating-point minimum number across vector"]
13703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13704#[inline]
13705#[target_feature(enable = "neon,fp16")]
13706#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13707#[cfg(not(target_arch = "arm64ec"))]
13708#[cfg_attr(test, assert_instr(fminv))]
13709pub fn vminvq_f16(a: float16x8_t) -> f16 {
13710    unsafe extern "unadjusted" {
13711        #[cfg_attr(
13712            any(target_arch = "aarch64", target_arch = "arm64ec"),
13713            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13714        )]
13715        fn _vminvq_f16(a: float16x8_t) -> f16;
13716    }
13717    unsafe { _vminvq_f16(a) }
13718}
13719#[doc = "Horizontal vector min."]
13720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13721#[inline]
13722#[target_feature(enable = "neon")]
13723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13724#[cfg_attr(test, assert_instr(fminp))]
13725pub fn vminv_f32(a: float32x2_t) -> f32 {
13726    unsafe extern "unadjusted" {
13727        #[cfg_attr(
13728            any(target_arch = "aarch64", target_arch = "arm64ec"),
13729            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13730        )]
13731        fn _vminv_f32(a: float32x2_t) -> f32;
13732    }
13733    unsafe { _vminv_f32(a) }
13734}
13735#[doc = "Horizontal vector min."]
13736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13737#[inline]
13738#[target_feature(enable = "neon")]
13739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13740#[cfg_attr(test, assert_instr(fminv))]
13741pub fn vminvq_f32(a: float32x4_t) -> f32 {
13742    unsafe extern "unadjusted" {
13743        #[cfg_attr(
13744            any(target_arch = "aarch64", target_arch = "arm64ec"),
13745            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13746        )]
13747        fn _vminvq_f32(a: float32x4_t) -> f32;
13748    }
13749    unsafe { _vminvq_f32(a) }
13750}
13751#[doc = "Horizontal vector min."]
13752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13753#[inline]
13754#[target_feature(enable = "neon")]
13755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13756#[cfg_attr(test, assert_instr(fminp))]
13757pub fn vminvq_f64(a: float64x2_t) -> f64 {
13758    unsafe extern "unadjusted" {
13759        #[cfg_attr(
13760            any(target_arch = "aarch64", target_arch = "arm64ec"),
13761            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13762        )]
13763        fn _vminvq_f64(a: float64x2_t) -> f64;
13764    }
13765    unsafe { _vminvq_f64(a) }
13766}
13767#[doc = "Horizontal vector min."]
13768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13769#[inline]
13770#[target_feature(enable = "neon")]
13771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13772#[cfg_attr(test, assert_instr(sminv))]
13773pub fn vminv_s8(a: int8x8_t) -> i8 {
13774    unsafe { simd_reduce_min(a) }
13775}
13776#[doc = "Horizontal vector min."]
13777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13778#[inline]
13779#[target_feature(enable = "neon")]
13780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13781#[cfg_attr(test, assert_instr(sminv))]
13782pub fn vminvq_s8(a: int8x16_t) -> i8 {
13783    unsafe { simd_reduce_min(a) }
13784}
13785#[doc = "Horizontal vector min."]
13786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13787#[inline]
13788#[target_feature(enable = "neon")]
13789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13790#[cfg_attr(test, assert_instr(sminv))]
13791pub fn vminv_s16(a: int16x4_t) -> i16 {
13792    unsafe { simd_reduce_min(a) }
13793}
13794#[doc = "Horizontal vector min."]
13795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13796#[inline]
13797#[target_feature(enable = "neon")]
13798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13799#[cfg_attr(test, assert_instr(sminv))]
13800pub fn vminvq_s16(a: int16x8_t) -> i16 {
13801    unsafe { simd_reduce_min(a) }
13802}
13803#[doc = "Horizontal vector min."]
13804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13805#[inline]
13806#[target_feature(enable = "neon")]
13807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13808#[cfg_attr(test, assert_instr(sminp))]
13809pub fn vminv_s32(a: int32x2_t) -> i32 {
13810    unsafe { simd_reduce_min(a) }
13811}
13812#[doc = "Horizontal vector min."]
13813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
13814#[inline]
13815#[target_feature(enable = "neon")]
13816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13817#[cfg_attr(test, assert_instr(sminv))]
13818pub fn vminvq_s32(a: int32x4_t) -> i32 {
13819    unsafe { simd_reduce_min(a) }
13820}
13821#[doc = "Horizontal vector min."]
13822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
13823#[inline]
13824#[target_feature(enable = "neon")]
13825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13826#[cfg_attr(test, assert_instr(uminv))]
13827pub fn vminv_u8(a: uint8x8_t) -> u8 {
13828    unsafe { simd_reduce_min(a) }
13829}
13830#[doc = "Horizontal vector min."]
13831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
13832#[inline]
13833#[target_feature(enable = "neon")]
13834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13835#[cfg_attr(test, assert_instr(uminv))]
13836pub fn vminvq_u8(a: uint8x16_t) -> u8 {
13837    unsafe { simd_reduce_min(a) }
13838}
13839#[doc = "Horizontal vector min."]
13840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
13841#[inline]
13842#[target_feature(enable = "neon")]
13843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13844#[cfg_attr(test, assert_instr(uminv))]
13845pub fn vminv_u16(a: uint16x4_t) -> u16 {
13846    unsafe { simd_reduce_min(a) }
13847}
13848#[doc = "Horizontal vector min."]
13849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
13850#[inline]
13851#[target_feature(enable = "neon")]
13852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13853#[cfg_attr(test, assert_instr(uminv))]
13854pub fn vminvq_u16(a: uint16x8_t) -> u16 {
13855    unsafe { simd_reduce_min(a) }
13856}
13857#[doc = "Horizontal vector min."]
13858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
13859#[inline]
13860#[target_feature(enable = "neon")]
13861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13862#[cfg_attr(test, assert_instr(uminp))]
13863pub fn vminv_u32(a: uint32x2_t) -> u32 {
13864    unsafe { simd_reduce_min(a) }
13865}
13866#[doc = "Horizontal vector min."]
13867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
13868#[inline]
13869#[target_feature(enable = "neon")]
13870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13871#[cfg_attr(test, assert_instr(uminv))]
13872pub fn vminvq_u32(a: uint32x4_t) -> u32 {
13873    unsafe { simd_reduce_min(a) }
13874}
13875#[doc = "Floating-point multiply-add to accumulator"]
13876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
13877#[inline]
13878#[target_feature(enable = "neon")]
13879#[cfg_attr(test, assert_instr(fmul))]
13880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13881pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
13882    unsafe { simd_add(a, simd_mul(b, c)) }
13883}
13884#[doc = "Floating-point multiply-add to accumulator"]
13885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
13886#[inline]
13887#[target_feature(enable = "neon")]
13888#[cfg_attr(test, assert_instr(fmul))]
13889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13890pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
13891    unsafe { simd_add(a, simd_mul(b, c)) }
13892}
13893#[doc = "Multiply-add long"]
13894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
13895#[inline]
13896#[target_feature(enable = "neon")]
13897#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13898#[rustc_legacy_const_generics(3)]
13899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13900pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
13901    static_assert_uimm_bits!(LANE, 2);
13902    unsafe {
13903        vmlal_high_s16(
13904            a,
13905            b,
13906            simd_shuffle!(
13907                c,
13908                c,
13909                [
13910                    LANE as u32,
13911                    LANE as u32,
13912                    LANE as u32,
13913                    LANE as u32,
13914                    LANE as u32,
13915                    LANE as u32,
13916                    LANE as u32,
13917                    LANE as u32
13918                ]
13919            ),
13920        )
13921    }
13922}
13923#[doc = "Multiply-add long"]
13924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
13925#[inline]
13926#[target_feature(enable = "neon")]
13927#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13928#[rustc_legacy_const_generics(3)]
13929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13930pub fn vmlal_high_laneq_s16<const LANE: i32>(
13931    a: int32x4_t,
13932    b: int16x8_t,
13933    c: int16x8_t,
13934) -> int32x4_t {
13935    static_assert_uimm_bits!(LANE, 3);
13936    unsafe {
13937        vmlal_high_s16(
13938            a,
13939            b,
13940            simd_shuffle!(
13941                c,
13942                c,
13943                [
13944                    LANE as u32,
13945                    LANE as u32,
13946                    LANE as u32,
13947                    LANE as u32,
13948                    LANE as u32,
13949                    LANE as u32,
13950                    LANE as u32,
13951                    LANE as u32
13952                ]
13953            ),
13954        )
13955    }
13956}
13957#[doc = "Multiply-add long"]
13958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
13959#[inline]
13960#[target_feature(enable = "neon")]
13961#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13962#[rustc_legacy_const_generics(3)]
13963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13964pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
13965    static_assert_uimm_bits!(LANE, 1);
13966    unsafe {
13967        vmlal_high_s32(
13968            a,
13969            b,
13970            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13971        )
13972    }
13973}
13974#[doc = "Multiply-add long"]
13975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
13976#[inline]
13977#[target_feature(enable = "neon")]
13978#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13979#[rustc_legacy_const_generics(3)]
13980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13981pub fn vmlal_high_laneq_s32<const LANE: i32>(
13982    a: int64x2_t,
13983    b: int32x4_t,
13984    c: int32x4_t,
13985) -> int64x2_t {
13986    static_assert_uimm_bits!(LANE, 2);
13987    unsafe {
13988        vmlal_high_s32(
13989            a,
13990            b,
13991            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13992        )
13993    }
13994}
13995#[doc = "Multiply-add long"]
13996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
13997#[inline]
13998#[target_feature(enable = "neon")]
13999#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14000#[rustc_legacy_const_generics(3)]
14001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14002pub fn vmlal_high_lane_u16<const LANE: i32>(
14003    a: uint32x4_t,
14004    b: uint16x8_t,
14005    c: uint16x4_t,
14006) -> uint32x4_t {
14007    static_assert_uimm_bits!(LANE, 2);
14008    unsafe {
14009        vmlal_high_u16(
14010            a,
14011            b,
14012            simd_shuffle!(
14013                c,
14014                c,
14015                [
14016                    LANE as u32,
14017                    LANE as u32,
14018                    LANE as u32,
14019                    LANE as u32,
14020                    LANE as u32,
14021                    LANE as u32,
14022                    LANE as u32,
14023                    LANE as u32
14024                ]
14025            ),
14026        )
14027    }
14028}
14029#[doc = "Multiply-add long"]
14030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14031#[inline]
14032#[target_feature(enable = "neon")]
14033#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14034#[rustc_legacy_const_generics(3)]
14035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14036pub fn vmlal_high_laneq_u16<const LANE: i32>(
14037    a: uint32x4_t,
14038    b: uint16x8_t,
14039    c: uint16x8_t,
14040) -> uint32x4_t {
14041    static_assert_uimm_bits!(LANE, 3);
14042    unsafe {
14043        vmlal_high_u16(
14044            a,
14045            b,
14046            simd_shuffle!(
14047                c,
14048                c,
14049                [
14050                    LANE as u32,
14051                    LANE as u32,
14052                    LANE as u32,
14053                    LANE as u32,
14054                    LANE as u32,
14055                    LANE as u32,
14056                    LANE as u32,
14057                    LANE as u32
14058                ]
14059            ),
14060        )
14061    }
14062}
14063#[doc = "Multiply-add long"]
14064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14065#[inline]
14066#[target_feature(enable = "neon")]
14067#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14068#[rustc_legacy_const_generics(3)]
14069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14070pub fn vmlal_high_lane_u32<const LANE: i32>(
14071    a: uint64x2_t,
14072    b: uint32x4_t,
14073    c: uint32x2_t,
14074) -> uint64x2_t {
14075    static_assert_uimm_bits!(LANE, 1);
14076    unsafe {
14077        vmlal_high_u32(
14078            a,
14079            b,
14080            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14081        )
14082    }
14083}
14084#[doc = "Multiply-add long"]
14085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14086#[inline]
14087#[target_feature(enable = "neon")]
14088#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14089#[rustc_legacy_const_generics(3)]
14090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14091pub fn vmlal_high_laneq_u32<const LANE: i32>(
14092    a: uint64x2_t,
14093    b: uint32x4_t,
14094    c: uint32x4_t,
14095) -> uint64x2_t {
14096    static_assert_uimm_bits!(LANE, 2);
14097    unsafe {
14098        vmlal_high_u32(
14099            a,
14100            b,
14101            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14102        )
14103    }
14104}
14105#[doc = "Multiply-add long"]
14106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14107#[inline]
14108#[target_feature(enable = "neon")]
14109#[cfg_attr(test, assert_instr(smlal2))]
14110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14111pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14112    vmlal_high_s16(a, b, vdupq_n_s16(c))
14113}
14114#[doc = "Multiply-add long"]
14115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14116#[inline]
14117#[target_feature(enable = "neon")]
14118#[cfg_attr(test, assert_instr(smlal2))]
14119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14120pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14121    vmlal_high_s32(a, b, vdupq_n_s32(c))
14122}
14123#[doc = "Multiply-add long"]
14124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14125#[inline]
14126#[target_feature(enable = "neon")]
14127#[cfg_attr(test, assert_instr(umlal2))]
14128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14129pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14130    vmlal_high_u16(a, b, vdupq_n_u16(c))
14131}
14132#[doc = "Multiply-add long"]
14133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14134#[inline]
14135#[target_feature(enable = "neon")]
14136#[cfg_attr(test, assert_instr(umlal2))]
14137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14138pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14139    vmlal_high_u32(a, b, vdupq_n_u32(c))
14140}
14141#[doc = "Signed multiply-add long"]
14142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14143#[inline]
14144#[target_feature(enable = "neon")]
14145#[cfg_attr(test, assert_instr(smlal2))]
14146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14147pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14148    unsafe {
14149        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14150        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14151        vmlal_s8(a, b, c)
14152    }
14153}
14154#[doc = "Signed multiply-add long"]
14155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14156#[inline]
14157#[target_feature(enable = "neon")]
14158#[cfg_attr(test, assert_instr(smlal2))]
14159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14160pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14161    unsafe {
14162        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14163        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14164        vmlal_s16(a, b, c)
14165    }
14166}
14167#[doc = "Signed multiply-add long"]
14168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14169#[inline]
14170#[target_feature(enable = "neon")]
14171#[cfg_attr(test, assert_instr(smlal2))]
14172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14173pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14174    unsafe {
14175        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14176        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14177        vmlal_s32(a, b, c)
14178    }
14179}
14180#[doc = "Unsigned multiply-add long"]
14181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14182#[inline]
14183#[target_feature(enable = "neon")]
14184#[cfg_attr(test, assert_instr(umlal2))]
14185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14186pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14187    unsafe {
14188        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14189        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14190        vmlal_u8(a, b, c)
14191    }
14192}
14193#[doc = "Unsigned multiply-add long"]
14194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14195#[inline]
14196#[target_feature(enable = "neon")]
14197#[cfg_attr(test, assert_instr(umlal2))]
14198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14199pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14200    unsafe {
14201        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14202        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14203        vmlal_u16(a, b, c)
14204    }
14205}
14206#[doc = "Unsigned multiply-add long"]
14207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14208#[inline]
14209#[target_feature(enable = "neon")]
14210#[cfg_attr(test, assert_instr(umlal2))]
14211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14212pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14213    unsafe {
14214        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14215        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14216        vmlal_u32(a, b, c)
14217    }
14218}
14219#[doc = "Floating-point multiply-subtract from accumulator"]
14220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14221#[inline]
14222#[target_feature(enable = "neon")]
14223#[cfg_attr(test, assert_instr(fmul))]
14224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14225pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14226    unsafe { simd_sub(a, simd_mul(b, c)) }
14227}
14228#[doc = "Floating-point multiply-subtract from accumulator"]
14229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14230#[inline]
14231#[target_feature(enable = "neon")]
14232#[cfg_attr(test, assert_instr(fmul))]
14233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14234pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14235    unsafe { simd_sub(a, simd_mul(b, c)) }
14236}
14237#[doc = "Multiply-subtract long"]
14238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14239#[inline]
14240#[target_feature(enable = "neon")]
14241#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14242#[rustc_legacy_const_generics(3)]
14243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14244pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14245    static_assert_uimm_bits!(LANE, 2);
14246    unsafe {
14247        vmlsl_high_s16(
14248            a,
14249            b,
14250            simd_shuffle!(
14251                c,
14252                c,
14253                [
14254                    LANE as u32,
14255                    LANE as u32,
14256                    LANE as u32,
14257                    LANE as u32,
14258                    LANE as u32,
14259                    LANE as u32,
14260                    LANE as u32,
14261                    LANE as u32
14262                ]
14263            ),
14264        )
14265    }
14266}
14267#[doc = "Multiply-subtract long"]
14268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14269#[inline]
14270#[target_feature(enable = "neon")]
14271#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14272#[rustc_legacy_const_generics(3)]
14273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14274pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14275    a: int32x4_t,
14276    b: int16x8_t,
14277    c: int16x8_t,
14278) -> int32x4_t {
14279    static_assert_uimm_bits!(LANE, 3);
14280    unsafe {
14281        vmlsl_high_s16(
14282            a,
14283            b,
14284            simd_shuffle!(
14285                c,
14286                c,
14287                [
14288                    LANE as u32,
14289                    LANE as u32,
14290                    LANE as u32,
14291                    LANE as u32,
14292                    LANE as u32,
14293                    LANE as u32,
14294                    LANE as u32,
14295                    LANE as u32
14296                ]
14297            ),
14298        )
14299    }
14300}
14301#[doc = "Multiply-subtract long"]
14302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14303#[inline]
14304#[target_feature(enable = "neon")]
14305#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14306#[rustc_legacy_const_generics(3)]
14307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14308pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14309    static_assert_uimm_bits!(LANE, 1);
14310    unsafe {
14311        vmlsl_high_s32(
14312            a,
14313            b,
14314            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14315        )
14316    }
14317}
14318#[doc = "Multiply-subtract long"]
14319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14320#[inline]
14321#[target_feature(enable = "neon")]
14322#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14323#[rustc_legacy_const_generics(3)]
14324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14325pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14326    a: int64x2_t,
14327    b: int32x4_t,
14328    c: int32x4_t,
14329) -> int64x2_t {
14330    static_assert_uimm_bits!(LANE, 2);
14331    unsafe {
14332        vmlsl_high_s32(
14333            a,
14334            b,
14335            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14336        )
14337    }
14338}
14339#[doc = "Multiply-subtract long"]
14340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14341#[inline]
14342#[target_feature(enable = "neon")]
14343#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14344#[rustc_legacy_const_generics(3)]
14345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14346pub fn vmlsl_high_lane_u16<const LANE: i32>(
14347    a: uint32x4_t,
14348    b: uint16x8_t,
14349    c: uint16x4_t,
14350) -> uint32x4_t {
14351    static_assert_uimm_bits!(LANE, 2);
14352    unsafe {
14353        vmlsl_high_u16(
14354            a,
14355            b,
14356            simd_shuffle!(
14357                c,
14358                c,
14359                [
14360                    LANE as u32,
14361                    LANE as u32,
14362                    LANE as u32,
14363                    LANE as u32,
14364                    LANE as u32,
14365                    LANE as u32,
14366                    LANE as u32,
14367                    LANE as u32
14368                ]
14369            ),
14370        )
14371    }
14372}
14373#[doc = "Multiply-subtract long"]
14374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14375#[inline]
14376#[target_feature(enable = "neon")]
14377#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14378#[rustc_legacy_const_generics(3)]
14379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14380pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14381    a: uint32x4_t,
14382    b: uint16x8_t,
14383    c: uint16x8_t,
14384) -> uint32x4_t {
14385    static_assert_uimm_bits!(LANE, 3);
14386    unsafe {
14387        vmlsl_high_u16(
14388            a,
14389            b,
14390            simd_shuffle!(
14391                c,
14392                c,
14393                [
14394                    LANE as u32,
14395                    LANE as u32,
14396                    LANE as u32,
14397                    LANE as u32,
14398                    LANE as u32,
14399                    LANE as u32,
14400                    LANE as u32,
14401                    LANE as u32
14402                ]
14403            ),
14404        )
14405    }
14406}
14407#[doc = "Multiply-subtract long"]
14408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14409#[inline]
14410#[target_feature(enable = "neon")]
14411#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14412#[rustc_legacy_const_generics(3)]
14413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14414pub fn vmlsl_high_lane_u32<const LANE: i32>(
14415    a: uint64x2_t,
14416    b: uint32x4_t,
14417    c: uint32x2_t,
14418) -> uint64x2_t {
14419    static_assert_uimm_bits!(LANE, 1);
14420    unsafe {
14421        vmlsl_high_u32(
14422            a,
14423            b,
14424            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14425        )
14426    }
14427}
14428#[doc = "Multiply-subtract long"]
14429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14430#[inline]
14431#[target_feature(enable = "neon")]
14432#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14433#[rustc_legacy_const_generics(3)]
14434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14435pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14436    a: uint64x2_t,
14437    b: uint32x4_t,
14438    c: uint32x4_t,
14439) -> uint64x2_t {
14440    static_assert_uimm_bits!(LANE, 2);
14441    unsafe {
14442        vmlsl_high_u32(
14443            a,
14444            b,
14445            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14446        )
14447    }
14448}
14449#[doc = "Multiply-subtract long"]
14450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14451#[inline]
14452#[target_feature(enable = "neon")]
14453#[cfg_attr(test, assert_instr(smlsl2))]
14454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14455pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14456    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14457}
14458#[doc = "Multiply-subtract long"]
14459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14460#[inline]
14461#[target_feature(enable = "neon")]
14462#[cfg_attr(test, assert_instr(smlsl2))]
14463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14464pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14465    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14466}
14467#[doc = "Multiply-subtract long"]
14468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14469#[inline]
14470#[target_feature(enable = "neon")]
14471#[cfg_attr(test, assert_instr(umlsl2))]
14472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14473pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14474    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14475}
14476#[doc = "Multiply-subtract long"]
14477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14478#[inline]
14479#[target_feature(enable = "neon")]
14480#[cfg_attr(test, assert_instr(umlsl2))]
14481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14482pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14483    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14484}
14485#[doc = "Signed multiply-subtract long"]
14486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14487#[inline]
14488#[target_feature(enable = "neon")]
14489#[cfg_attr(test, assert_instr(smlsl2))]
14490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14491pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14492    unsafe {
14493        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14494        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14495        vmlsl_s8(a, b, c)
14496    }
14497}
14498#[doc = "Signed multiply-subtract long"]
14499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14500#[inline]
14501#[target_feature(enable = "neon")]
14502#[cfg_attr(test, assert_instr(smlsl2))]
14503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14504pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14505    unsafe {
14506        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14507        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14508        vmlsl_s16(a, b, c)
14509    }
14510}
14511#[doc = "Signed multiply-subtract long"]
14512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14513#[inline]
14514#[target_feature(enable = "neon")]
14515#[cfg_attr(test, assert_instr(smlsl2))]
14516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14517pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14518    unsafe {
14519        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14520        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14521        vmlsl_s32(a, b, c)
14522    }
14523}
14524#[doc = "Unsigned multiply-subtract long"]
14525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14526#[inline]
14527#[target_feature(enable = "neon")]
14528#[cfg_attr(test, assert_instr(umlsl2))]
14529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14530pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14531    unsafe {
14532        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14533        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14534        vmlsl_u8(a, b, c)
14535    }
14536}
14537#[doc = "Unsigned multiply-subtract long"]
14538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14539#[inline]
14540#[target_feature(enable = "neon")]
14541#[cfg_attr(test, assert_instr(umlsl2))]
14542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14543pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14544    unsafe {
14545        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14546        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14547        vmlsl_u16(a, b, c)
14548    }
14549}
14550#[doc = "Unsigned multiply-subtract long"]
14551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14552#[inline]
14553#[target_feature(enable = "neon")]
14554#[cfg_attr(test, assert_instr(umlsl2))]
14555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14556pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14557    unsafe {
14558        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14559        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14560        vmlsl_u32(a, b, c)
14561    }
14562}
14563#[doc = "Vector move"]
14564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14565#[inline]
14566#[target_feature(enable = "neon")]
14567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14568#[cfg_attr(test, assert_instr(sxtl2))]
14569pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14570    unsafe {
14571        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14572        vmovl_s8(a)
14573    }
14574}
14575#[doc = "Vector move"]
14576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14577#[inline]
14578#[target_feature(enable = "neon")]
14579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14580#[cfg_attr(test, assert_instr(sxtl2))]
14581pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14582    unsafe {
14583        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14584        vmovl_s16(a)
14585    }
14586}
14587#[doc = "Vector move"]
14588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14589#[inline]
14590#[target_feature(enable = "neon")]
14591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14592#[cfg_attr(test, assert_instr(sxtl2))]
14593pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14594    unsafe {
14595        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14596        vmovl_s32(a)
14597    }
14598}
14599#[doc = "Vector move"]
14600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14601#[inline]
14602#[target_feature(enable = "neon")]
14603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14604#[cfg_attr(test, assert_instr(uxtl2))]
14605pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14606    unsafe {
14607        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14608        vmovl_u8(a)
14609    }
14610}
14611#[doc = "Vector move"]
14612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14613#[inline]
14614#[target_feature(enable = "neon")]
14615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14616#[cfg_attr(test, assert_instr(uxtl2))]
14617pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14618    unsafe {
14619        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14620        vmovl_u16(a)
14621    }
14622}
14623#[doc = "Vector move"]
14624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14625#[inline]
14626#[target_feature(enable = "neon")]
14627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14628#[cfg_attr(test, assert_instr(uxtl2))]
14629pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14630    unsafe {
14631        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14632        vmovl_u32(a)
14633    }
14634}
14635#[doc = "Extract narrow"]
14636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14637#[inline]
14638#[target_feature(enable = "neon")]
14639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14640#[cfg_attr(test, assert_instr(xtn2))]
14641pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14642    unsafe {
14643        let c: int8x8_t = simd_cast(b);
14644        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14645    }
14646}
14647#[doc = "Extract narrow"]
14648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14649#[inline]
14650#[target_feature(enable = "neon")]
14651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14652#[cfg_attr(test, assert_instr(xtn2))]
14653pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14654    unsafe {
14655        let c: int16x4_t = simd_cast(b);
14656        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14657    }
14658}
14659#[doc = "Extract narrow"]
14660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14661#[inline]
14662#[target_feature(enable = "neon")]
14663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14664#[cfg_attr(test, assert_instr(xtn2))]
14665pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14666    unsafe {
14667        let c: int32x2_t = simd_cast(b);
14668        simd_shuffle!(a, c, [0, 1, 2, 3])
14669    }
14670}
14671#[doc = "Extract narrow"]
14672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14673#[inline]
14674#[target_feature(enable = "neon")]
14675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14676#[cfg_attr(test, assert_instr(xtn2))]
14677pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14678    unsafe {
14679        let c: uint8x8_t = simd_cast(b);
14680        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14681    }
14682}
14683#[doc = "Extract narrow"]
14684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14685#[inline]
14686#[target_feature(enable = "neon")]
14687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14688#[cfg_attr(test, assert_instr(xtn2))]
14689pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14690    unsafe {
14691        let c: uint16x4_t = simd_cast(b);
14692        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14693    }
14694}
14695#[doc = "Extract narrow"]
14696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14697#[inline]
14698#[target_feature(enable = "neon")]
14699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14700#[cfg_attr(test, assert_instr(xtn2))]
14701pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14702    unsafe {
14703        let c: uint32x2_t = simd_cast(b);
14704        simd_shuffle!(a, c, [0, 1, 2, 3])
14705    }
14706}
14707#[doc = "Multiply"]
14708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14709#[inline]
14710#[target_feature(enable = "neon")]
14711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14712#[cfg_attr(test, assert_instr(fmul))]
14713pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14714    unsafe { simd_mul(a, b) }
14715}
14716#[doc = "Multiply"]
14717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14718#[inline]
14719#[target_feature(enable = "neon")]
14720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14721#[cfg_attr(test, assert_instr(fmul))]
14722pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14723    unsafe { simd_mul(a, b) }
14724}
14725#[doc = "Floating-point multiply"]
14726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14727#[inline]
14728#[target_feature(enable = "neon")]
14729#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14730#[rustc_legacy_const_generics(2)]
14731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14732pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14733    static_assert!(LANE == 0);
14734    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14735}
14736#[doc = "Floating-point multiply"]
14737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14738#[inline]
14739#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14740#[rustc_legacy_const_generics(2)]
14741#[target_feature(enable = "neon,fp16")]
14742#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14743#[cfg(not(target_arch = "arm64ec"))]
14744pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14745    static_assert_uimm_bits!(LANE, 3);
14746    unsafe {
14747        simd_mul(
14748            a,
14749            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14750        )
14751    }
14752}
14753#[doc = "Floating-point multiply"]
14754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14755#[inline]
14756#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14757#[rustc_legacy_const_generics(2)]
14758#[target_feature(enable = "neon,fp16")]
14759#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14760#[cfg(not(target_arch = "arm64ec"))]
14761pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14762    static_assert_uimm_bits!(LANE, 3);
14763    unsafe {
14764        simd_mul(
14765            a,
14766            simd_shuffle!(
14767                b,
14768                b,
14769                [
14770                    LANE as u32,
14771                    LANE as u32,
14772                    LANE as u32,
14773                    LANE as u32,
14774                    LANE as u32,
14775                    LANE as u32,
14776                    LANE as u32,
14777                    LANE as u32
14778                ]
14779            ),
14780        )
14781    }
14782}
14783#[doc = "Floating-point multiply"]
14784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14785#[inline]
14786#[target_feature(enable = "neon")]
14787#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14788#[rustc_legacy_const_generics(2)]
14789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14790pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14791    static_assert_uimm_bits!(LANE, 1);
14792    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14793}
14794#[doc = "Vector multiply by scalar"]
14795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14796#[inline]
14797#[target_feature(enable = "neon")]
14798#[cfg_attr(test, assert_instr(fmul))]
14799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14800pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14801    unsafe { simd_mul(a, vdup_n_f64(b)) }
14802}
14803#[doc = "Vector multiply by scalar"]
14804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14805#[inline]
14806#[target_feature(enable = "neon")]
14807#[cfg_attr(test, assert_instr(fmul))]
14808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14809pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14810    unsafe { simd_mul(a, vdupq_n_f64(b)) }
14811}
14812#[doc = "Floating-point multiply"]
14813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14814#[inline]
14815#[target_feature(enable = "neon")]
14816#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14817#[rustc_legacy_const_generics(2)]
14818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14819pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14820    static_assert!(LANE == 0);
14821    unsafe {
14822        let b: f64 = simd_extract!(b, LANE as u32);
14823        a * b
14824    }
14825}
14826#[doc = "Add"]
14827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14828#[inline]
14829#[target_feature(enable = "neon,fp16")]
14830#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14831#[cfg(not(target_arch = "arm64ec"))]
14832#[cfg_attr(test, assert_instr(fmul))]
14833pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14834    a * b
14835}
14836#[doc = "Floating-point multiply"]
14837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14838#[inline]
14839#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14840#[rustc_legacy_const_generics(2)]
14841#[target_feature(enable = "neon,fp16")]
14842#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14843#[cfg(not(target_arch = "arm64ec"))]
14844pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14845    static_assert_uimm_bits!(LANE, 2);
14846    unsafe {
14847        let b: f16 = simd_extract!(b, LANE as u32);
14848        a * b
14849    }
14850}
14851#[doc = "Floating-point multiply"]
14852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14853#[inline]
14854#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14855#[rustc_legacy_const_generics(2)]
14856#[target_feature(enable = "neon,fp16")]
14857#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14858#[cfg(not(target_arch = "arm64ec"))]
14859pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14860    static_assert_uimm_bits!(LANE, 3);
14861    unsafe {
14862        let b: f16 = simd_extract!(b, LANE as u32);
14863        a * b
14864    }
14865}
14866#[doc = "Multiply long"]
14867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14868#[inline]
14869#[target_feature(enable = "neon")]
14870#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14871#[rustc_legacy_const_generics(2)]
14872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14873pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14874    static_assert_uimm_bits!(LANE, 2);
14875    unsafe {
14876        vmull_high_s16(
14877            a,
14878            simd_shuffle!(
14879                b,
14880                b,
14881                [
14882                    LANE as u32,
14883                    LANE as u32,
14884                    LANE as u32,
14885                    LANE as u32,
14886                    LANE as u32,
14887                    LANE as u32,
14888                    LANE as u32,
14889                    LANE as u32
14890                ]
14891            ),
14892        )
14893    }
14894}
14895#[doc = "Multiply long"]
14896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
14897#[inline]
14898#[target_feature(enable = "neon")]
14899#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14900#[rustc_legacy_const_generics(2)]
14901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14902pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14903    static_assert_uimm_bits!(LANE, 3);
14904    unsafe {
14905        vmull_high_s16(
14906            a,
14907            simd_shuffle!(
14908                b,
14909                b,
14910                [
14911                    LANE as u32,
14912                    LANE as u32,
14913                    LANE as u32,
14914                    LANE as u32,
14915                    LANE as u32,
14916                    LANE as u32,
14917                    LANE as u32,
14918                    LANE as u32
14919                ]
14920            ),
14921        )
14922    }
14923}
14924#[doc = "Multiply long"]
14925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
14926#[inline]
14927#[target_feature(enable = "neon")]
14928#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14929#[rustc_legacy_const_generics(2)]
14930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14931pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
14932    static_assert_uimm_bits!(LANE, 1);
14933    unsafe {
14934        vmull_high_s32(
14935            a,
14936            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14937        )
14938    }
14939}
14940#[doc = "Multiply long"]
14941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
14942#[inline]
14943#[target_feature(enable = "neon")]
14944#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14945#[rustc_legacy_const_generics(2)]
14946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14947pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
14948    static_assert_uimm_bits!(LANE, 2);
14949    unsafe {
14950        vmull_high_s32(
14951            a,
14952            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14953        )
14954    }
14955}
14956#[doc = "Multiply long"]
14957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
14958#[inline]
14959#[target_feature(enable = "neon")]
14960#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14961#[rustc_legacy_const_generics(2)]
14962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14963pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
14964    static_assert_uimm_bits!(LANE, 2);
14965    unsafe {
14966        vmull_high_u16(
14967            a,
14968            simd_shuffle!(
14969                b,
14970                b,
14971                [
14972                    LANE as u32,
14973                    LANE as u32,
14974                    LANE as u32,
14975                    LANE as u32,
14976                    LANE as u32,
14977                    LANE as u32,
14978                    LANE as u32,
14979                    LANE as u32
14980                ]
14981            ),
14982        )
14983    }
14984}
14985#[doc = "Multiply long"]
14986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
14987#[inline]
14988#[target_feature(enable = "neon")]
14989#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14990#[rustc_legacy_const_generics(2)]
14991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14992pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
14993    static_assert_uimm_bits!(LANE, 3);
14994    unsafe {
14995        vmull_high_u16(
14996            a,
14997            simd_shuffle!(
14998                b,
14999                b,
15000                [
15001                    LANE as u32,
15002                    LANE as u32,
15003                    LANE as u32,
15004                    LANE as u32,
15005                    LANE as u32,
15006                    LANE as u32,
15007                    LANE as u32,
15008                    LANE as u32
15009                ]
15010            ),
15011        )
15012    }
15013}
15014#[doc = "Multiply long"]
15015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15016#[inline]
15017#[target_feature(enable = "neon")]
15018#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15019#[rustc_legacy_const_generics(2)]
15020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15021pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15022    static_assert_uimm_bits!(LANE, 1);
15023    unsafe {
15024        vmull_high_u32(
15025            a,
15026            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15027        )
15028    }
15029}
15030#[doc = "Multiply long"]
15031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15032#[inline]
15033#[target_feature(enable = "neon")]
15034#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15035#[rustc_legacy_const_generics(2)]
15036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15037pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15038    static_assert_uimm_bits!(LANE, 2);
15039    unsafe {
15040        vmull_high_u32(
15041            a,
15042            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15043        )
15044    }
15045}
15046#[doc = "Multiply long"]
15047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15048#[inline]
15049#[target_feature(enable = "neon")]
15050#[cfg_attr(test, assert_instr(smull2))]
15051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15052pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15053    vmull_high_s16(a, vdupq_n_s16(b))
15054}
15055#[doc = "Multiply long"]
15056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15057#[inline]
15058#[target_feature(enable = "neon")]
15059#[cfg_attr(test, assert_instr(smull2))]
15060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15061pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15062    vmull_high_s32(a, vdupq_n_s32(b))
15063}
15064#[doc = "Multiply long"]
15065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15066#[inline]
15067#[target_feature(enable = "neon")]
15068#[cfg_attr(test, assert_instr(umull2))]
15069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15070pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15071    vmull_high_u16(a, vdupq_n_u16(b))
15072}
15073#[doc = "Multiply long"]
15074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15075#[inline]
15076#[target_feature(enable = "neon")]
15077#[cfg_attr(test, assert_instr(umull2))]
15078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15079pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15080    vmull_high_u32(a, vdupq_n_u32(b))
15081}
15082#[doc = "Polynomial multiply long"]
15083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15084#[inline]
15085#[target_feature(enable = "neon,aes")]
15086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15087#[cfg_attr(test, assert_instr(pmull2))]
15088pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15089    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15090}
15091#[doc = "Polynomial multiply long"]
15092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15093#[inline]
15094#[target_feature(enable = "neon")]
15095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15096#[cfg_attr(test, assert_instr(pmull2))]
15097pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15098    unsafe {
15099        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15100        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15101        vmull_p8(a, b)
15102    }
15103}
15104#[doc = "Signed multiply long"]
15105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15106#[inline]
15107#[target_feature(enable = "neon")]
15108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15109#[cfg_attr(test, assert_instr(smull2))]
15110pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15111    unsafe {
15112        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15113        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15114        vmull_s8(a, b)
15115    }
15116}
15117#[doc = "Signed multiply long"]
15118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15119#[inline]
15120#[target_feature(enable = "neon")]
15121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15122#[cfg_attr(test, assert_instr(smull2))]
15123pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15124    unsafe {
15125        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15126        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15127        vmull_s16(a, b)
15128    }
15129}
15130#[doc = "Signed multiply long"]
15131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15132#[inline]
15133#[target_feature(enable = "neon")]
15134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15135#[cfg_attr(test, assert_instr(smull2))]
15136pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15137    unsafe {
15138        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15139        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15140        vmull_s32(a, b)
15141    }
15142}
15143#[doc = "Unsigned multiply long"]
15144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15145#[inline]
15146#[target_feature(enable = "neon")]
15147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15148#[cfg_attr(test, assert_instr(umull2))]
15149pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15150    unsafe {
15151        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15152        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15153        vmull_u8(a, b)
15154    }
15155}
15156#[doc = "Unsigned multiply long"]
15157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15158#[inline]
15159#[target_feature(enable = "neon")]
15160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15161#[cfg_attr(test, assert_instr(umull2))]
15162pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15163    unsafe {
15164        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15165        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15166        vmull_u16(a, b)
15167    }
15168}
15169#[doc = "Unsigned multiply long"]
15170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15171#[inline]
15172#[target_feature(enable = "neon")]
15173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15174#[cfg_attr(test, assert_instr(umull2))]
15175pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15176    unsafe {
15177        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15178        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15179        vmull_u32(a, b)
15180    }
15181}
15182#[doc = "Polynomial multiply long"]
15183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15184#[inline]
15185#[target_feature(enable = "neon,aes")]
15186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15187#[cfg_attr(test, assert_instr(pmull))]
15188pub fn vmull_p64(a: p64, b: p64) -> p128 {
15189    unsafe extern "unadjusted" {
15190        #[cfg_attr(
15191            any(target_arch = "aarch64", target_arch = "arm64ec"),
15192            link_name = "llvm.aarch64.neon.pmull64"
15193        )]
15194        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15195    }
15196    unsafe { transmute(_vmull_p64(a, b)) }
15197}
15198#[doc = "Floating-point multiply"]
15199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15200#[inline]
15201#[target_feature(enable = "neon")]
15202#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15203#[rustc_legacy_const_generics(2)]
15204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15205pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15206    static_assert!(LANE == 0);
15207    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15208}
15209#[doc = "Floating-point multiply"]
15210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15211#[inline]
15212#[target_feature(enable = "neon")]
15213#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15214#[rustc_legacy_const_generics(2)]
15215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15216pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15217    static_assert_uimm_bits!(LANE, 1);
15218    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15219}
15220#[doc = "Floating-point multiply"]
15221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15222#[inline]
15223#[target_feature(enable = "neon")]
15224#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15225#[rustc_legacy_const_generics(2)]
15226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15227pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15228    static_assert_uimm_bits!(LANE, 1);
15229    unsafe {
15230        let b: f32 = simd_extract!(b, LANE as u32);
15231        a * b
15232    }
15233}
15234#[doc = "Floating-point multiply"]
15235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15236#[inline]
15237#[target_feature(enable = "neon")]
15238#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15239#[rustc_legacy_const_generics(2)]
15240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15241pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15242    static_assert_uimm_bits!(LANE, 2);
15243    unsafe {
15244        let b: f32 = simd_extract!(b, LANE as u32);
15245        a * b
15246    }
15247}
15248#[doc = "Floating-point multiply"]
15249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15250#[inline]
15251#[target_feature(enable = "neon")]
15252#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15253#[rustc_legacy_const_generics(2)]
15254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15255pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15256    static_assert_uimm_bits!(LANE, 1);
15257    unsafe {
15258        let b: f64 = simd_extract!(b, LANE as u32);
15259        a * b
15260    }
15261}
15262#[doc = "Floating-point multiply extended"]
15263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15264#[inline]
15265#[target_feature(enable = "neon,fp16")]
15266#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15267#[cfg(not(target_arch = "arm64ec"))]
15268#[cfg_attr(test, assert_instr(fmulx))]
15269pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15270    unsafe extern "unadjusted" {
15271        #[cfg_attr(
15272            any(target_arch = "aarch64", target_arch = "arm64ec"),
15273            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15274        )]
15275        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15276    }
15277    unsafe { _vmulx_f16(a, b) }
15278}
15279#[doc = "Floating-point multiply extended"]
15280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15281#[inline]
15282#[target_feature(enable = "neon,fp16")]
15283#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15284#[cfg(not(target_arch = "arm64ec"))]
15285#[cfg_attr(test, assert_instr(fmulx))]
15286pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15287    unsafe extern "unadjusted" {
15288        #[cfg_attr(
15289            any(target_arch = "aarch64", target_arch = "arm64ec"),
15290            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15291        )]
15292        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15293    }
15294    unsafe { _vmulxq_f16(a, b) }
15295}
15296#[doc = "Floating-point multiply extended"]
15297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15298#[inline]
15299#[target_feature(enable = "neon")]
15300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15301#[cfg_attr(test, assert_instr(fmulx))]
15302pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15303    unsafe extern "unadjusted" {
15304        #[cfg_attr(
15305            any(target_arch = "aarch64", target_arch = "arm64ec"),
15306            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15307        )]
15308        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15309    }
15310    unsafe { _vmulx_f32(a, b) }
15311}
15312#[doc = "Floating-point multiply extended"]
15313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15314#[inline]
15315#[target_feature(enable = "neon")]
15316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15317#[cfg_attr(test, assert_instr(fmulx))]
15318pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15319    unsafe extern "unadjusted" {
15320        #[cfg_attr(
15321            any(target_arch = "aarch64", target_arch = "arm64ec"),
15322            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15323        )]
15324        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15325    }
15326    unsafe { _vmulxq_f32(a, b) }
15327}
15328#[doc = "Floating-point multiply extended"]
15329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15330#[inline]
15331#[target_feature(enable = "neon")]
15332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15333#[cfg_attr(test, assert_instr(fmulx))]
15334pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15335    unsafe extern "unadjusted" {
15336        #[cfg_attr(
15337            any(target_arch = "aarch64", target_arch = "arm64ec"),
15338            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15339        )]
15340        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15341    }
15342    unsafe { _vmulx_f64(a, b) }
15343}
15344#[doc = "Floating-point multiply extended"]
15345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15346#[inline]
15347#[target_feature(enable = "neon")]
15348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15349#[cfg_attr(test, assert_instr(fmulx))]
15350pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15351    unsafe extern "unadjusted" {
15352        #[cfg_attr(
15353            any(target_arch = "aarch64", target_arch = "arm64ec"),
15354            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15355        )]
15356        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15357    }
15358    unsafe { _vmulxq_f64(a, b) }
15359}
15360#[doc = "Floating-point multiply extended"]
15361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15362#[inline]
15363#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15364#[rustc_legacy_const_generics(2)]
15365#[target_feature(enable = "neon,fp16")]
15366#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15367#[cfg(not(target_arch = "arm64ec"))]
15368pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15369    static_assert_uimm_bits!(LANE, 2);
15370    unsafe {
15371        vmulx_f16(
15372            a,
15373            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15374        )
15375    }
15376}
15377#[doc = "Floating-point multiply extended"]
15378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15379#[inline]
15380#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15381#[rustc_legacy_const_generics(2)]
15382#[target_feature(enable = "neon,fp16")]
15383#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15384#[cfg(not(target_arch = "arm64ec"))]
15385pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15386    static_assert_uimm_bits!(LANE, 3);
15387    unsafe {
15388        vmulx_f16(
15389            a,
15390            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15391        )
15392    }
15393}
15394#[doc = "Floating-point multiply extended"]
15395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15396#[inline]
15397#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15398#[rustc_legacy_const_generics(2)]
15399#[target_feature(enable = "neon,fp16")]
15400#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15401#[cfg(not(target_arch = "arm64ec"))]
15402pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15403    static_assert_uimm_bits!(LANE, 2);
15404    unsafe {
15405        vmulxq_f16(
15406            a,
15407            simd_shuffle!(
15408                b,
15409                b,
15410                [
15411                    LANE as u32,
15412                    LANE as u32,
15413                    LANE as u32,
15414                    LANE as u32,
15415                    LANE as u32,
15416                    LANE as u32,
15417                    LANE as u32,
15418                    LANE as u32
15419                ]
15420            ),
15421        )
15422    }
15423}
15424#[doc = "Floating-point multiply extended"]
15425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15426#[inline]
15427#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15428#[rustc_legacy_const_generics(2)]
15429#[target_feature(enable = "neon,fp16")]
15430#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15431#[cfg(not(target_arch = "arm64ec"))]
15432pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15433    static_assert_uimm_bits!(LANE, 3);
15434    unsafe {
15435        vmulxq_f16(
15436            a,
15437            simd_shuffle!(
15438                b,
15439                b,
15440                [
15441                    LANE as u32,
15442                    LANE as u32,
15443                    LANE as u32,
15444                    LANE as u32,
15445                    LANE as u32,
15446                    LANE as u32,
15447                    LANE as u32,
15448                    LANE as u32
15449                ]
15450            ),
15451        )
15452    }
15453}
15454#[doc = "Floating-point multiply extended"]
15455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15456#[inline]
15457#[target_feature(enable = "neon")]
15458#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15459#[rustc_legacy_const_generics(2)]
15460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15461pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15462    static_assert_uimm_bits!(LANE, 1);
15463    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15464}
15465#[doc = "Floating-point multiply extended"]
15466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15467#[inline]
15468#[target_feature(enable = "neon")]
15469#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15470#[rustc_legacy_const_generics(2)]
15471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15472pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15473    static_assert_uimm_bits!(LANE, 2);
15474    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15475}
15476#[doc = "Floating-point multiply extended"]
15477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15478#[inline]
15479#[target_feature(enable = "neon")]
15480#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15481#[rustc_legacy_const_generics(2)]
15482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15483pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15484    static_assert_uimm_bits!(LANE, 1);
15485    unsafe {
15486        vmulxq_f32(
15487            a,
15488            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15489        )
15490    }
15491}
15492#[doc = "Floating-point multiply extended"]
15493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15494#[inline]
15495#[target_feature(enable = "neon")]
15496#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15497#[rustc_legacy_const_generics(2)]
15498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15499pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15500    static_assert_uimm_bits!(LANE, 2);
15501    unsafe {
15502        vmulxq_f32(
15503            a,
15504            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15505        )
15506    }
15507}
15508#[doc = "Floating-point multiply extended"]
15509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15510#[inline]
15511#[target_feature(enable = "neon")]
15512#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15513#[rustc_legacy_const_generics(2)]
15514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15515pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15516    static_assert_uimm_bits!(LANE, 1);
15517    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15518}
15519#[doc = "Floating-point multiply extended"]
15520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15521#[inline]
15522#[target_feature(enable = "neon")]
15523#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15524#[rustc_legacy_const_generics(2)]
15525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15526pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15527    static_assert!(LANE == 0);
15528    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15529}
15530#[doc = "Floating-point multiply extended"]
15531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15532#[inline]
15533#[target_feature(enable = "neon")]
15534#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15535#[rustc_legacy_const_generics(2)]
15536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15537pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15538    static_assert_uimm_bits!(LANE, 1);
15539    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15540}
15541#[doc = "Vector multiply by scalar"]
15542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15543#[inline]
15544#[cfg_attr(test, assert_instr(fmulx))]
15545#[target_feature(enable = "neon,fp16")]
15546#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15547#[cfg(not(target_arch = "arm64ec"))]
15548pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15549    vmulx_f16(a, vdup_n_f16(b))
15550}
15551#[doc = "Vector multiply by scalar"]
15552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15553#[inline]
15554#[cfg_attr(test, assert_instr(fmulx))]
15555#[target_feature(enable = "neon,fp16")]
15556#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15557#[cfg(not(target_arch = "arm64ec"))]
15558pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15559    vmulxq_f16(a, vdupq_n_f16(b))
15560}
15561#[doc = "Floating-point multiply extended"]
15562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15563#[inline]
15564#[target_feature(enable = "neon")]
15565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15566#[cfg_attr(test, assert_instr(fmulx))]
15567pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15568    unsafe extern "unadjusted" {
15569        #[cfg_attr(
15570            any(target_arch = "aarch64", target_arch = "arm64ec"),
15571            link_name = "llvm.aarch64.neon.fmulx.f64"
15572        )]
15573        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15574    }
15575    unsafe { _vmulxd_f64(a, b) }
15576}
15577#[doc = "Floating-point multiply extended"]
15578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15579#[inline]
15580#[target_feature(enable = "neon")]
15581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15582#[cfg_attr(test, assert_instr(fmulx))]
15583pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15584    unsafe extern "unadjusted" {
15585        #[cfg_attr(
15586            any(target_arch = "aarch64", target_arch = "arm64ec"),
15587            link_name = "llvm.aarch64.neon.fmulx.f32"
15588        )]
15589        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15590    }
15591    unsafe { _vmulxs_f32(a, b) }
15592}
15593#[doc = "Floating-point multiply extended"]
15594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15595#[inline]
15596#[target_feature(enable = "neon")]
15597#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15598#[rustc_legacy_const_generics(2)]
15599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15600pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15601    static_assert!(LANE == 0);
15602    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15603}
15604#[doc = "Floating-point multiply extended"]
15605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15606#[inline]
15607#[target_feature(enable = "neon")]
15608#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15609#[rustc_legacy_const_generics(2)]
15610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15611pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15612    static_assert_uimm_bits!(LANE, 1);
15613    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15614}
15615#[doc = "Floating-point multiply extended"]
15616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15617#[inline]
15618#[target_feature(enable = "neon")]
15619#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15620#[rustc_legacy_const_generics(2)]
15621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15622pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15623    static_assert_uimm_bits!(LANE, 1);
15624    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15625}
15626#[doc = "Floating-point multiply extended"]
15627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15628#[inline]
15629#[target_feature(enable = "neon")]
15630#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15631#[rustc_legacy_const_generics(2)]
15632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15633pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15634    static_assert_uimm_bits!(LANE, 2);
15635    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15636}
15637#[doc = "Floating-point multiply extended"]
15638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15639#[inline]
15640#[target_feature(enable = "neon,fp16")]
15641#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15642#[cfg(not(target_arch = "arm64ec"))]
15643#[cfg_attr(test, assert_instr(fmulx))]
15644pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15645    unsafe extern "unadjusted" {
15646        #[cfg_attr(
15647            any(target_arch = "aarch64", target_arch = "arm64ec"),
15648            link_name = "llvm.aarch64.neon.fmulx.f16"
15649        )]
15650        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15651    }
15652    unsafe { _vmulxh_f16(a, b) }
15653}
15654#[doc = "Floating-point multiply extended"]
15655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15656#[inline]
15657#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15658#[rustc_legacy_const_generics(2)]
15659#[target_feature(enable = "neon,fp16")]
15660#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15661#[cfg(not(target_arch = "arm64ec"))]
15662pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15663    static_assert_uimm_bits!(LANE, 2);
15664    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15665}
15666#[doc = "Floating-point multiply extended"]
15667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15668#[inline]
15669#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15670#[rustc_legacy_const_generics(2)]
15671#[target_feature(enable = "neon,fp16")]
15672#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15673#[cfg(not(target_arch = "arm64ec"))]
15674pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15675    static_assert_uimm_bits!(LANE, 3);
15676    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15677}
15678#[doc = "Floating-point multiply extended"]
15679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15680#[inline]
15681#[target_feature(enable = "neon")]
15682#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15683#[rustc_legacy_const_generics(2)]
15684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15685pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15686    static_assert!(LANE == 0);
15687    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15688}
15689#[doc = "Negate"]
15690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15691#[inline]
15692#[target_feature(enable = "neon")]
15693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15694#[cfg_attr(test, assert_instr(fneg))]
15695pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15696    unsafe { simd_neg(a) }
15697}
15698#[doc = "Negate"]
15699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15700#[inline]
15701#[target_feature(enable = "neon")]
15702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15703#[cfg_attr(test, assert_instr(fneg))]
15704pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15705    unsafe { simd_neg(a) }
15706}
15707#[doc = "Negate"]
15708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15709#[inline]
15710#[target_feature(enable = "neon")]
15711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15712#[cfg_attr(test, assert_instr(neg))]
15713pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15714    unsafe { simd_neg(a) }
15715}
15716#[doc = "Negate"]
15717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15718#[inline]
15719#[target_feature(enable = "neon")]
15720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15721#[cfg_attr(test, assert_instr(neg))]
15722pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15723    unsafe { simd_neg(a) }
15724}
15725#[doc = "Negate"]
15726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15727#[inline]
15728#[target_feature(enable = "neon")]
15729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15730#[cfg_attr(test, assert_instr(neg))]
15731pub fn vnegd_s64(a: i64) -> i64 {
15732    a.wrapping_neg()
15733}
15734#[doc = "Negate"]
15735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15736#[inline]
15737#[target_feature(enable = "neon,fp16")]
15738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15739#[cfg(not(target_arch = "arm64ec"))]
15740#[cfg_attr(test, assert_instr(fneg))]
15741pub fn vnegh_f16(a: f16) -> f16 {
15742    -a
15743}
15744#[doc = "Floating-point add pairwise"]
15745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15746#[inline]
15747#[target_feature(enable = "neon")]
15748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15749#[cfg_attr(test, assert_instr(nop))]
15750pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15751    unsafe {
15752        let a1: f64 = simd_extract!(a, 0);
15753        let a2: f64 = simd_extract!(a, 1);
15754        a1 + a2
15755    }
15756}
15757#[doc = "Floating-point add pairwise"]
15758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15759#[inline]
15760#[target_feature(enable = "neon")]
15761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15762#[cfg_attr(test, assert_instr(nop))]
15763pub fn vpadds_f32(a: float32x2_t) -> f32 {
15764    unsafe {
15765        let a1: f32 = simd_extract!(a, 0);
15766        let a2: f32 = simd_extract!(a, 1);
15767        a1 + a2
15768    }
15769}
15770#[doc = "Add pairwise"]
15771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15772#[inline]
15773#[target_feature(enable = "neon")]
15774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15775#[cfg_attr(test, assert_instr(addp))]
15776pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15777    unsafe { simd_reduce_add_ordered(a, 0) }
15778}
15779#[doc = "Add pairwise"]
15780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15781#[inline]
15782#[target_feature(enable = "neon")]
15783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15784#[cfg_attr(test, assert_instr(addp))]
15785pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15786    unsafe { simd_reduce_add_ordered(a, 0) }
15787}
15788#[doc = "Floating-point add pairwise"]
15789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15790#[inline]
15791#[target_feature(enable = "neon,fp16")]
15792#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15793#[cfg(not(target_arch = "arm64ec"))]
15794#[cfg_attr(test, assert_instr(faddp))]
15795pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15796    unsafe extern "unadjusted" {
15797        #[cfg_attr(
15798            any(target_arch = "aarch64", target_arch = "arm64ec"),
15799            link_name = "llvm.aarch64.neon.faddp.v8f16"
15800        )]
15801        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15802    }
15803    unsafe { _vpaddq_f16(a, b) }
15804}
15805#[doc = "Floating-point add pairwise"]
15806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15807#[inline]
15808#[target_feature(enable = "neon")]
15809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15810#[cfg_attr(test, assert_instr(faddp))]
15811pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15812    unsafe extern "unadjusted" {
15813        #[cfg_attr(
15814            any(target_arch = "aarch64", target_arch = "arm64ec"),
15815            link_name = "llvm.aarch64.neon.faddp.v4f32"
15816        )]
15817        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15818    }
15819    unsafe { _vpaddq_f32(a, b) }
15820}
15821#[doc = "Floating-point add pairwise"]
15822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15823#[inline]
15824#[target_feature(enable = "neon")]
15825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15826#[cfg_attr(test, assert_instr(faddp))]
15827pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15828    unsafe extern "unadjusted" {
15829        #[cfg_attr(
15830            any(target_arch = "aarch64", target_arch = "arm64ec"),
15831            link_name = "llvm.aarch64.neon.faddp.v2f64"
15832        )]
15833        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15834    }
15835    unsafe { _vpaddq_f64(a, b) }
15836}
15837#[doc = "Add Pairwise"]
15838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15839#[inline]
15840#[target_feature(enable = "neon")]
15841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15842#[cfg_attr(test, assert_instr(addp))]
15843pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15844    unsafe extern "unadjusted" {
15845        #[cfg_attr(
15846            any(target_arch = "aarch64", target_arch = "arm64ec"),
15847            link_name = "llvm.aarch64.neon.addp.v16i8"
15848        )]
15849        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
15850    }
15851    unsafe { _vpaddq_s8(a, b) }
15852}
15853#[doc = "Add Pairwise"]
15854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15855#[inline]
15856#[target_feature(enable = "neon")]
15857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15858#[cfg_attr(test, assert_instr(addp))]
15859pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15860    unsafe extern "unadjusted" {
15861        #[cfg_attr(
15862            any(target_arch = "aarch64", target_arch = "arm64ec"),
15863            link_name = "llvm.aarch64.neon.addp.v8i16"
15864        )]
15865        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
15866    }
15867    unsafe { _vpaddq_s16(a, b) }
15868}
15869#[doc = "Add Pairwise"]
15870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15871#[inline]
15872#[target_feature(enable = "neon")]
15873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15874#[cfg_attr(test, assert_instr(addp))]
15875pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15876    unsafe extern "unadjusted" {
15877        #[cfg_attr(
15878            any(target_arch = "aarch64", target_arch = "arm64ec"),
15879            link_name = "llvm.aarch64.neon.addp.v4i32"
15880        )]
15881        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
15882    }
15883    unsafe { _vpaddq_s32(a, b) }
15884}
15885#[doc = "Add Pairwise"]
15886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15887#[inline]
15888#[target_feature(enable = "neon")]
15889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15890#[cfg_attr(test, assert_instr(addp))]
15891pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15892    unsafe extern "unadjusted" {
15893        #[cfg_attr(
15894            any(target_arch = "aarch64", target_arch = "arm64ec"),
15895            link_name = "llvm.aarch64.neon.addp.v2i64"
15896        )]
15897        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
15898    }
15899    unsafe { _vpaddq_s64(a, b) }
15900}
15901#[doc = "Add Pairwise"]
15902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15903#[inline]
15904#[cfg(target_endian = "little")]
15905#[target_feature(enable = "neon")]
15906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15907#[cfg_attr(test, assert_instr(addp))]
15908pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15909    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
15910}
15911#[doc = "Add Pairwise"]
15912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15913#[inline]
15914#[cfg(target_endian = "big")]
15915#[target_feature(enable = "neon")]
15916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15917#[cfg_attr(test, assert_instr(addp))]
15918pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15919    let a: uint8x16_t =
15920        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15921    let b: uint8x16_t =
15922        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15923    unsafe {
15924        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
15925        simd_shuffle!(
15926            ret_val,
15927            ret_val,
15928            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
15929        )
15930    }
15931}
15932#[doc = "Add Pairwise"]
15933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15934#[inline]
15935#[cfg(target_endian = "little")]
15936#[target_feature(enable = "neon")]
15937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15938#[cfg_attr(test, assert_instr(addp))]
15939pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15940    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
15941}
15942#[doc = "Add Pairwise"]
15943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15944#[inline]
15945#[cfg(target_endian = "big")]
15946#[target_feature(enable = "neon")]
15947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15948#[cfg_attr(test, assert_instr(addp))]
15949pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15950    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
15951    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
15952    unsafe {
15953        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
15954        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
15955    }
15956}
15957#[doc = "Add Pairwise"]
15958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15959#[inline]
15960#[cfg(target_endian = "little")]
15961#[target_feature(enable = "neon")]
15962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15963#[cfg_attr(test, assert_instr(addp))]
15964pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15965    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
15966}
15967#[doc = "Add Pairwise"]
15968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15969#[inline]
15970#[cfg(target_endian = "big")]
15971#[target_feature(enable = "neon")]
15972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15973#[cfg_attr(test, assert_instr(addp))]
15974pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15975    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
15976    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
15977    unsafe {
15978        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
15979        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
15980    }
15981}
15982#[doc = "Add Pairwise"]
15983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15984#[inline]
15985#[cfg(target_endian = "little")]
15986#[target_feature(enable = "neon")]
15987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15988#[cfg_attr(test, assert_instr(addp))]
15989pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15990    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
15991}
15992#[doc = "Add Pairwise"]
15993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15994#[inline]
15995#[cfg(target_endian = "big")]
15996#[target_feature(enable = "neon")]
15997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15998#[cfg_attr(test, assert_instr(addp))]
15999pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16000    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16001    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16002    unsafe {
16003        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16004        simd_shuffle!(ret_val, ret_val, [1, 0])
16005    }
16006}
16007#[doc = "Floating-point add pairwise"]
16008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16009#[inline]
16010#[target_feature(enable = "neon,fp16")]
16011#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16012#[cfg(not(target_arch = "arm64ec"))]
16013#[cfg_attr(test, assert_instr(fmaxp))]
16014pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16015    unsafe extern "unadjusted" {
16016        #[cfg_attr(
16017            any(target_arch = "aarch64", target_arch = "arm64ec"),
16018            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16019        )]
16020        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16021    }
16022    unsafe { _vpmax_f16(a, b) }
16023}
16024#[doc = "Floating-point add pairwise"]
16025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16026#[inline]
16027#[target_feature(enable = "neon,fp16")]
16028#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16029#[cfg(not(target_arch = "arm64ec"))]
16030#[cfg_attr(test, assert_instr(fmaxp))]
16031pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16032    unsafe extern "unadjusted" {
16033        #[cfg_attr(
16034            any(target_arch = "aarch64", target_arch = "arm64ec"),
16035            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16036        )]
16037        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16038    }
16039    unsafe { _vpmaxq_f16(a, b) }
16040}
16041#[doc = "Floating-point add pairwise"]
16042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16043#[inline]
16044#[target_feature(enable = "neon,fp16")]
16045#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16046#[cfg(not(target_arch = "arm64ec"))]
16047#[cfg_attr(test, assert_instr(fmaxnmp))]
16048pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16049    unsafe extern "unadjusted" {
16050        #[cfg_attr(
16051            any(target_arch = "aarch64", target_arch = "arm64ec"),
16052            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16053        )]
16054        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16055    }
16056    unsafe { _vpmaxnm_f16(a, b) }
16057}
16058#[doc = "Floating-point add pairwise"]
16059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16060#[inline]
16061#[target_feature(enable = "neon,fp16")]
16062#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16063#[cfg(not(target_arch = "arm64ec"))]
16064#[cfg_attr(test, assert_instr(fmaxnmp))]
16065pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16066    unsafe extern "unadjusted" {
16067        #[cfg_attr(
16068            any(target_arch = "aarch64", target_arch = "arm64ec"),
16069            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16070        )]
16071        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16072    }
16073    unsafe { _vpmaxnmq_f16(a, b) }
16074}
16075#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16077#[inline]
16078#[target_feature(enable = "neon")]
16079#[cfg_attr(test, assert_instr(fmaxnmp))]
16080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16081pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16082    unsafe extern "unadjusted" {
16083        #[cfg_attr(
16084            any(target_arch = "aarch64", target_arch = "arm64ec"),
16085            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16086        )]
16087        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16088    }
16089    unsafe { _vpmaxnm_f32(a, b) }
16090}
16091#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16093#[inline]
16094#[target_feature(enable = "neon")]
16095#[cfg_attr(test, assert_instr(fmaxnmp))]
16096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16097pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16098    unsafe extern "unadjusted" {
16099        #[cfg_attr(
16100            any(target_arch = "aarch64", target_arch = "arm64ec"),
16101            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16102        )]
16103        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16104    }
16105    unsafe { _vpmaxnmq_f32(a, b) }
16106}
16107#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16109#[inline]
16110#[target_feature(enable = "neon")]
16111#[cfg_attr(test, assert_instr(fmaxnmp))]
16112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16113pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16114    unsafe extern "unadjusted" {
16115        #[cfg_attr(
16116            any(target_arch = "aarch64", target_arch = "arm64ec"),
16117            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16118        )]
16119        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16120    }
16121    unsafe { _vpmaxnmq_f64(a, b) }
16122}
16123#[doc = "Floating-point maximum number pairwise"]
16124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16125#[inline]
16126#[target_feature(enable = "neon")]
16127#[cfg_attr(test, assert_instr(fmaxnmp))]
16128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16129pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16130    unsafe extern "unadjusted" {
16131        #[cfg_attr(
16132            any(target_arch = "aarch64", target_arch = "arm64ec"),
16133            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16134        )]
16135        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16136    }
16137    unsafe { _vpmaxnmqd_f64(a) }
16138}
16139#[doc = "Floating-point maximum number pairwise"]
16140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16141#[inline]
16142#[target_feature(enable = "neon")]
16143#[cfg_attr(test, assert_instr(fmaxnmp))]
16144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16145pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16146    unsafe extern "unadjusted" {
16147        #[cfg_attr(
16148            any(target_arch = "aarch64", target_arch = "arm64ec"),
16149            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16150        )]
16151        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16152    }
16153    unsafe { _vpmaxnms_f32(a) }
16154}
16155#[doc = "Folding maximum of adjacent pairs"]
16156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16157#[inline]
16158#[target_feature(enable = "neon")]
16159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16160#[cfg_attr(test, assert_instr(fmaxp))]
16161pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16162    unsafe extern "unadjusted" {
16163        #[cfg_attr(
16164            any(target_arch = "aarch64", target_arch = "arm64ec"),
16165            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16166        )]
16167        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16168    }
16169    unsafe { _vpmaxq_f32(a, b) }
16170}
16171#[doc = "Folding maximum of adjacent pairs"]
16172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16173#[inline]
16174#[target_feature(enable = "neon")]
16175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16176#[cfg_attr(test, assert_instr(fmaxp))]
16177pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16178    unsafe extern "unadjusted" {
16179        #[cfg_attr(
16180            any(target_arch = "aarch64", target_arch = "arm64ec"),
16181            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16182        )]
16183        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16184    }
16185    unsafe { _vpmaxq_f64(a, b) }
16186}
16187#[doc = "Folding maximum of adjacent pairs"]
16188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16189#[inline]
16190#[target_feature(enable = "neon")]
16191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16192#[cfg_attr(test, assert_instr(smaxp))]
16193pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16194    unsafe extern "unadjusted" {
16195        #[cfg_attr(
16196            any(target_arch = "aarch64", target_arch = "arm64ec"),
16197            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16198        )]
16199        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16200    }
16201    unsafe { _vpmaxq_s8(a, b) }
16202}
16203#[doc = "Folding maximum of adjacent pairs"]
16204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16205#[inline]
16206#[target_feature(enable = "neon")]
16207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16208#[cfg_attr(test, assert_instr(smaxp))]
16209pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16210    unsafe extern "unadjusted" {
16211        #[cfg_attr(
16212            any(target_arch = "aarch64", target_arch = "arm64ec"),
16213            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16214        )]
16215        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16216    }
16217    unsafe { _vpmaxq_s16(a, b) }
16218}
16219#[doc = "Folding maximum of adjacent pairs"]
16220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16221#[inline]
16222#[target_feature(enable = "neon")]
16223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16224#[cfg_attr(test, assert_instr(smaxp))]
16225pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16226    unsafe extern "unadjusted" {
16227        #[cfg_attr(
16228            any(target_arch = "aarch64", target_arch = "arm64ec"),
16229            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16230        )]
16231        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16232    }
16233    unsafe { _vpmaxq_s32(a, b) }
16234}
16235#[doc = "Folding maximum of adjacent pairs"]
16236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16237#[inline]
16238#[target_feature(enable = "neon")]
16239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16240#[cfg_attr(test, assert_instr(umaxp))]
16241pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16242    unsafe extern "unadjusted" {
16243        #[cfg_attr(
16244            any(target_arch = "aarch64", target_arch = "arm64ec"),
16245            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16246        )]
16247        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16248    }
16249    unsafe { _vpmaxq_u8(a, b) }
16250}
16251#[doc = "Folding maximum of adjacent pairs"]
16252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16253#[inline]
16254#[target_feature(enable = "neon")]
16255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16256#[cfg_attr(test, assert_instr(umaxp))]
16257pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16258    unsafe extern "unadjusted" {
16259        #[cfg_attr(
16260            any(target_arch = "aarch64", target_arch = "arm64ec"),
16261            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16262        )]
16263        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16264    }
16265    unsafe { _vpmaxq_u16(a, b) }
16266}
16267#[doc = "Folding maximum of adjacent pairs"]
16268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16269#[inline]
16270#[target_feature(enable = "neon")]
16271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16272#[cfg_attr(test, assert_instr(umaxp))]
16273pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16274    unsafe extern "unadjusted" {
16275        #[cfg_attr(
16276            any(target_arch = "aarch64", target_arch = "arm64ec"),
16277            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16278        )]
16279        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16280    }
16281    unsafe { _vpmaxq_u32(a, b) }
16282}
16283#[doc = "Floating-point maximum pairwise"]
16284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16285#[inline]
16286#[target_feature(enable = "neon")]
16287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16288#[cfg_attr(test, assert_instr(fmaxp))]
16289pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16290    unsafe extern "unadjusted" {
16291        #[cfg_attr(
16292            any(target_arch = "aarch64", target_arch = "arm64ec"),
16293            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16294        )]
16295        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16296    }
16297    unsafe { _vpmaxqd_f64(a) }
16298}
16299#[doc = "Floating-point maximum pairwise"]
16300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16301#[inline]
16302#[target_feature(enable = "neon")]
16303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16304#[cfg_attr(test, assert_instr(fmaxp))]
16305pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16306    unsafe extern "unadjusted" {
16307        #[cfg_attr(
16308            any(target_arch = "aarch64", target_arch = "arm64ec"),
16309            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16310        )]
16311        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16312    }
16313    unsafe { _vpmaxs_f32(a) }
16314}
16315#[doc = "Floating-point add pairwise"]
16316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16317#[inline]
16318#[target_feature(enable = "neon,fp16")]
16319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16320#[cfg(not(target_arch = "arm64ec"))]
16321#[cfg_attr(test, assert_instr(fminp))]
16322pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16323    unsafe extern "unadjusted" {
16324        #[cfg_attr(
16325            any(target_arch = "aarch64", target_arch = "arm64ec"),
16326            link_name = "llvm.aarch64.neon.fminp.v4f16"
16327        )]
16328        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16329    }
16330    unsafe { _vpmin_f16(a, b) }
16331}
16332#[doc = "Floating-point add pairwise"]
16333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16334#[inline]
16335#[target_feature(enable = "neon,fp16")]
16336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16337#[cfg(not(target_arch = "arm64ec"))]
16338#[cfg_attr(test, assert_instr(fminp))]
16339pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16340    unsafe extern "unadjusted" {
16341        #[cfg_attr(
16342            any(target_arch = "aarch64", target_arch = "arm64ec"),
16343            link_name = "llvm.aarch64.neon.fminp.v8f16"
16344        )]
16345        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16346    }
16347    unsafe { _vpminq_f16(a, b) }
16348}
16349#[doc = "Floating-point add pairwise"]
16350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16351#[inline]
16352#[target_feature(enable = "neon,fp16")]
16353#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16354#[cfg(not(target_arch = "arm64ec"))]
16355#[cfg_attr(test, assert_instr(fminnmp))]
16356pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16357    unsafe extern "unadjusted" {
16358        #[cfg_attr(
16359            any(target_arch = "aarch64", target_arch = "arm64ec"),
16360            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16361        )]
16362        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16363    }
16364    unsafe { _vpminnm_f16(a, b) }
16365}
16366#[doc = "Floating-point add pairwise"]
16367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16368#[inline]
16369#[target_feature(enable = "neon,fp16")]
16370#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16371#[cfg(not(target_arch = "arm64ec"))]
16372#[cfg_attr(test, assert_instr(fminnmp))]
16373pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16374    unsafe extern "unadjusted" {
16375        #[cfg_attr(
16376            any(target_arch = "aarch64", target_arch = "arm64ec"),
16377            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16378        )]
16379        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16380    }
16381    unsafe { _vpminnmq_f16(a, b) }
16382}
16383#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16385#[inline]
16386#[target_feature(enable = "neon")]
16387#[cfg_attr(test, assert_instr(fminnmp))]
16388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16389pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16390    unsafe extern "unadjusted" {
16391        #[cfg_attr(
16392            any(target_arch = "aarch64", target_arch = "arm64ec"),
16393            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16394        )]
16395        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16396    }
16397    unsafe { _vpminnm_f32(a, b) }
16398}
16399#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16401#[inline]
16402#[target_feature(enable = "neon")]
16403#[cfg_attr(test, assert_instr(fminnmp))]
16404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16405pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16406    unsafe extern "unadjusted" {
16407        #[cfg_attr(
16408            any(target_arch = "aarch64", target_arch = "arm64ec"),
16409            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16410        )]
16411        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16412    }
16413    unsafe { _vpminnmq_f32(a, b) }
16414}
16415#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16417#[inline]
16418#[target_feature(enable = "neon")]
16419#[cfg_attr(test, assert_instr(fminnmp))]
16420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16421pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16422    unsafe extern "unadjusted" {
16423        #[cfg_attr(
16424            any(target_arch = "aarch64", target_arch = "arm64ec"),
16425            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16426        )]
16427        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16428    }
16429    unsafe { _vpminnmq_f64(a, b) }
16430}
16431#[doc = "Floating-point minimum number pairwise"]
16432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16433#[inline]
16434#[target_feature(enable = "neon")]
16435#[cfg_attr(test, assert_instr(fminnmp))]
16436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16437pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16438    unsafe extern "unadjusted" {
16439        #[cfg_attr(
16440            any(target_arch = "aarch64", target_arch = "arm64ec"),
16441            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16442        )]
16443        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16444    }
16445    unsafe { _vpminnmqd_f64(a) }
16446}
16447#[doc = "Floating-point minimum number pairwise"]
16448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16449#[inline]
16450#[target_feature(enable = "neon")]
16451#[cfg_attr(test, assert_instr(fminnmp))]
16452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16453pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16454    unsafe extern "unadjusted" {
16455        #[cfg_attr(
16456            any(target_arch = "aarch64", target_arch = "arm64ec"),
16457            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16458        )]
16459        fn _vpminnms_f32(a: float32x2_t) -> f32;
16460    }
16461    unsafe { _vpminnms_f32(a) }
16462}
16463#[doc = "Folding minimum of adjacent pairs"]
16464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16465#[inline]
16466#[target_feature(enable = "neon")]
16467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16468#[cfg_attr(test, assert_instr(fminp))]
16469pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16470    unsafe extern "unadjusted" {
16471        #[cfg_attr(
16472            any(target_arch = "aarch64", target_arch = "arm64ec"),
16473            link_name = "llvm.aarch64.neon.fminp.v4f32"
16474        )]
16475        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16476    }
16477    unsafe { _vpminq_f32(a, b) }
16478}
16479#[doc = "Folding minimum of adjacent pairs"]
16480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16481#[inline]
16482#[target_feature(enable = "neon")]
16483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16484#[cfg_attr(test, assert_instr(fminp))]
16485pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16486    unsafe extern "unadjusted" {
16487        #[cfg_attr(
16488            any(target_arch = "aarch64", target_arch = "arm64ec"),
16489            link_name = "llvm.aarch64.neon.fminp.v2f64"
16490        )]
16491        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16492    }
16493    unsafe { _vpminq_f64(a, b) }
16494}
16495#[doc = "Folding minimum of adjacent pairs"]
16496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16497#[inline]
16498#[target_feature(enable = "neon")]
16499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16500#[cfg_attr(test, assert_instr(sminp))]
16501pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16502    unsafe extern "unadjusted" {
16503        #[cfg_attr(
16504            any(target_arch = "aarch64", target_arch = "arm64ec"),
16505            link_name = "llvm.aarch64.neon.sminp.v16i8"
16506        )]
16507        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16508    }
16509    unsafe { _vpminq_s8(a, b) }
16510}
16511#[doc = "Folding minimum of adjacent pairs"]
16512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16513#[inline]
16514#[target_feature(enable = "neon")]
16515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16516#[cfg_attr(test, assert_instr(sminp))]
16517pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16518    unsafe extern "unadjusted" {
16519        #[cfg_attr(
16520            any(target_arch = "aarch64", target_arch = "arm64ec"),
16521            link_name = "llvm.aarch64.neon.sminp.v8i16"
16522        )]
16523        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16524    }
16525    unsafe { _vpminq_s16(a, b) }
16526}
16527#[doc = "Folding minimum of adjacent pairs"]
16528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16529#[inline]
16530#[target_feature(enable = "neon")]
16531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16532#[cfg_attr(test, assert_instr(sminp))]
16533pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16534    unsafe extern "unadjusted" {
16535        #[cfg_attr(
16536            any(target_arch = "aarch64", target_arch = "arm64ec"),
16537            link_name = "llvm.aarch64.neon.sminp.v4i32"
16538        )]
16539        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16540    }
16541    unsafe { _vpminq_s32(a, b) }
16542}
16543#[doc = "Folding minimum of adjacent pairs"]
16544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16545#[inline]
16546#[target_feature(enable = "neon")]
16547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16548#[cfg_attr(test, assert_instr(uminp))]
16549pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16550    unsafe extern "unadjusted" {
16551        #[cfg_attr(
16552            any(target_arch = "aarch64", target_arch = "arm64ec"),
16553            link_name = "llvm.aarch64.neon.uminp.v16i8"
16554        )]
16555        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16556    }
16557    unsafe { _vpminq_u8(a, b) }
16558}
16559#[doc = "Folding minimum of adjacent pairs"]
16560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16561#[inline]
16562#[target_feature(enable = "neon")]
16563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16564#[cfg_attr(test, assert_instr(uminp))]
16565pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16566    unsafe extern "unadjusted" {
16567        #[cfg_attr(
16568            any(target_arch = "aarch64", target_arch = "arm64ec"),
16569            link_name = "llvm.aarch64.neon.uminp.v8i16"
16570        )]
16571        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16572    }
16573    unsafe { _vpminq_u16(a, b) }
16574}
16575#[doc = "Folding minimum of adjacent pairs"]
16576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16577#[inline]
16578#[target_feature(enable = "neon")]
16579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16580#[cfg_attr(test, assert_instr(uminp))]
16581pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16582    unsafe extern "unadjusted" {
16583        #[cfg_attr(
16584            any(target_arch = "aarch64", target_arch = "arm64ec"),
16585            link_name = "llvm.aarch64.neon.uminp.v4i32"
16586        )]
16587        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16588    }
16589    unsafe { _vpminq_u32(a, b) }
16590}
16591#[doc = "Floating-point minimum pairwise"]
16592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16593#[inline]
16594#[target_feature(enable = "neon")]
16595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16596#[cfg_attr(test, assert_instr(fminp))]
16597pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16598    unsafe extern "unadjusted" {
16599        #[cfg_attr(
16600            any(target_arch = "aarch64", target_arch = "arm64ec"),
16601            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16602        )]
16603        fn _vpminqd_f64(a: float64x2_t) -> f64;
16604    }
16605    unsafe { _vpminqd_f64(a) }
16606}
16607#[doc = "Floating-point minimum pairwise"]
16608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16609#[inline]
16610#[target_feature(enable = "neon")]
16611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16612#[cfg_attr(test, assert_instr(fminp))]
16613pub fn vpmins_f32(a: float32x2_t) -> f32 {
16614    unsafe extern "unadjusted" {
16615        #[cfg_attr(
16616            any(target_arch = "aarch64", target_arch = "arm64ec"),
16617            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16618        )]
16619        fn _vpmins_f32(a: float32x2_t) -> f32;
16620    }
16621    unsafe { _vpmins_f32(a) }
16622}
16623#[doc = "Signed saturating Absolute value"]
16624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16625#[inline]
16626#[target_feature(enable = "neon")]
16627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16628#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16629pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16630    unsafe extern "unadjusted" {
16631        #[cfg_attr(
16632            any(target_arch = "aarch64", target_arch = "arm64ec"),
16633            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16634        )]
16635        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16636    }
16637    unsafe { _vqabs_s64(a) }
16638}
16639#[doc = "Signed saturating Absolute value"]
16640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16641#[inline]
16642#[target_feature(enable = "neon")]
16643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16644#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16645pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16646    unsafe extern "unadjusted" {
16647        #[cfg_attr(
16648            any(target_arch = "aarch64", target_arch = "arm64ec"),
16649            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16650        )]
16651        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16652    }
16653    unsafe { _vqabsq_s64(a) }
16654}
16655#[doc = "Signed saturating absolute value"]
16656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16657#[inline]
16658#[target_feature(enable = "neon")]
16659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16660#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16661pub fn vqabsb_s8(a: i8) -> i8 {
16662    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16663}
16664#[doc = "Signed saturating absolute value"]
16665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16666#[inline]
16667#[target_feature(enable = "neon")]
16668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16669#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16670pub fn vqabsh_s16(a: i16) -> i16 {
16671    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16672}
16673#[doc = "Signed saturating absolute value"]
16674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16675#[inline]
16676#[target_feature(enable = "neon")]
16677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16678#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16679pub fn vqabss_s32(a: i32) -> i32 {
16680    unsafe extern "unadjusted" {
16681        #[cfg_attr(
16682            any(target_arch = "aarch64", target_arch = "arm64ec"),
16683            link_name = "llvm.aarch64.neon.sqabs.i32"
16684        )]
16685        fn _vqabss_s32(a: i32) -> i32;
16686    }
16687    unsafe { _vqabss_s32(a) }
16688}
16689#[doc = "Signed saturating absolute value"]
16690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16691#[inline]
16692#[target_feature(enable = "neon")]
16693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16694#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16695pub fn vqabsd_s64(a: i64) -> i64 {
16696    unsafe extern "unadjusted" {
16697        #[cfg_attr(
16698            any(target_arch = "aarch64", target_arch = "arm64ec"),
16699            link_name = "llvm.aarch64.neon.sqabs.i64"
16700        )]
16701        fn _vqabsd_s64(a: i64) -> i64;
16702    }
16703    unsafe { _vqabsd_s64(a) }
16704}
16705#[doc = "Saturating add"]
16706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16707#[inline]
16708#[target_feature(enable = "neon")]
16709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16710#[cfg_attr(test, assert_instr(sqadd))]
16711pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16712    let a: int8x8_t = vdup_n_s8(a);
16713    let b: int8x8_t = vdup_n_s8(b);
16714    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16715}
16716#[doc = "Saturating add"]
16717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16718#[inline]
16719#[target_feature(enable = "neon")]
16720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16721#[cfg_attr(test, assert_instr(sqadd))]
16722pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16723    let a: int16x4_t = vdup_n_s16(a);
16724    let b: int16x4_t = vdup_n_s16(b);
16725    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16726}
16727#[doc = "Saturating add"]
16728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16729#[inline]
16730#[target_feature(enable = "neon")]
16731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16732#[cfg_attr(test, assert_instr(uqadd))]
16733pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16734    let a: uint8x8_t = vdup_n_u8(a);
16735    let b: uint8x8_t = vdup_n_u8(b);
16736    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16737}
16738#[doc = "Saturating add"]
16739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16740#[inline]
16741#[target_feature(enable = "neon")]
16742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16743#[cfg_attr(test, assert_instr(uqadd))]
16744pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16745    let a: uint16x4_t = vdup_n_u16(a);
16746    let b: uint16x4_t = vdup_n_u16(b);
16747    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16748}
16749#[doc = "Saturating add"]
16750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16751#[inline]
16752#[target_feature(enable = "neon")]
16753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16754#[cfg_attr(test, assert_instr(sqadd))]
16755pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16756    unsafe extern "unadjusted" {
16757        #[cfg_attr(
16758            any(target_arch = "aarch64", target_arch = "arm64ec"),
16759            link_name = "llvm.aarch64.neon.sqadd.i32"
16760        )]
16761        fn _vqadds_s32(a: i32, b: i32) -> i32;
16762    }
16763    unsafe { _vqadds_s32(a, b) }
16764}
16765#[doc = "Saturating add"]
16766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16767#[inline]
16768#[target_feature(enable = "neon")]
16769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16770#[cfg_attr(test, assert_instr(sqadd))]
16771pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16772    unsafe extern "unadjusted" {
16773        #[cfg_attr(
16774            any(target_arch = "aarch64", target_arch = "arm64ec"),
16775            link_name = "llvm.aarch64.neon.sqadd.i64"
16776        )]
16777        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16778    }
16779    unsafe { _vqaddd_s64(a, b) }
16780}
16781#[doc = "Saturating add"]
16782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16783#[inline]
16784#[target_feature(enable = "neon")]
16785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16786#[cfg_attr(test, assert_instr(uqadd))]
16787pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16788    unsafe extern "unadjusted" {
16789        #[cfg_attr(
16790            any(target_arch = "aarch64", target_arch = "arm64ec"),
16791            link_name = "llvm.aarch64.neon.uqadd.i32"
16792        )]
16793        fn _vqadds_u32(a: u32, b: u32) -> u32;
16794    }
16795    unsafe { _vqadds_u32(a, b) }
16796}
16797#[doc = "Saturating add"]
16798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16799#[inline]
16800#[target_feature(enable = "neon")]
16801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16802#[cfg_attr(test, assert_instr(uqadd))]
16803pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16804    unsafe extern "unadjusted" {
16805        #[cfg_attr(
16806            any(target_arch = "aarch64", target_arch = "arm64ec"),
16807            link_name = "llvm.aarch64.neon.uqadd.i64"
16808        )]
16809        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16810    }
16811    unsafe { _vqaddd_u64(a, b) }
16812}
16813#[doc = "Signed saturating doubling multiply-add long"]
16814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16815#[inline]
16816#[target_feature(enable = "neon")]
16817#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16818#[rustc_legacy_const_generics(3)]
16819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16820pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16821    static_assert_uimm_bits!(N, 2);
16822    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16823}
16824#[doc = "Signed saturating doubling multiply-add long"]
16825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16826#[inline]
16827#[target_feature(enable = "neon")]
16828#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16829#[rustc_legacy_const_generics(3)]
16830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16831pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16832    static_assert_uimm_bits!(N, 3);
16833    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16834}
16835#[doc = "Signed saturating doubling multiply-add long"]
16836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16837#[inline]
16838#[target_feature(enable = "neon")]
16839#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16840#[rustc_legacy_const_generics(3)]
16841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16842pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16843    static_assert_uimm_bits!(N, 1);
16844    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16845}
16846#[doc = "Signed saturating doubling multiply-add long"]
16847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16848#[inline]
16849#[target_feature(enable = "neon")]
16850#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16851#[rustc_legacy_const_generics(3)]
16852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16853pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16854    static_assert_uimm_bits!(N, 2);
16855    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16856}
16857#[doc = "Signed saturating doubling multiply-add long"]
16858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16859#[inline]
16860#[target_feature(enable = "neon")]
16861#[cfg_attr(test, assert_instr(sqdmlal2))]
16862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16863pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16864    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16865}
16866#[doc = "Signed saturating doubling multiply-add long"]
16867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16868#[inline]
16869#[target_feature(enable = "neon")]
16870#[cfg_attr(test, assert_instr(sqdmlal2))]
16871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16872pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16873    vqaddq_s32(a, vqdmull_high_s16(b, c))
16874}
16875#[doc = "Signed saturating doubling multiply-add long"]
16876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16877#[inline]
16878#[target_feature(enable = "neon")]
16879#[cfg_attr(test, assert_instr(sqdmlal2))]
16880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16881pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16882    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16883}
16884#[doc = "Signed saturating doubling multiply-add long"]
16885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16886#[inline]
16887#[target_feature(enable = "neon")]
16888#[cfg_attr(test, assert_instr(sqdmlal2))]
16889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16890pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16891    vqaddq_s64(a, vqdmull_high_s32(b, c))
16892}
16893#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16895#[inline]
16896#[target_feature(enable = "neon")]
16897#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16898#[rustc_legacy_const_generics(3)]
16899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16900pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16901    static_assert_uimm_bits!(N, 3);
16902    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16903}
16904#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16906#[inline]
16907#[target_feature(enable = "neon")]
16908#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16909#[rustc_legacy_const_generics(3)]
16910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16911pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16912    static_assert_uimm_bits!(N, 2);
16913    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16914}
16915#[doc = "Signed saturating doubling multiply-add long"]
16916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16917#[inline]
16918#[target_feature(enable = "neon")]
16919#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16920#[rustc_legacy_const_generics(3)]
16921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16922pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16923    static_assert_uimm_bits!(LANE, 2);
16924    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16925}
16926#[doc = "Signed saturating doubling multiply-add long"]
16927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16928#[inline]
16929#[target_feature(enable = "neon")]
16930#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16931#[rustc_legacy_const_generics(3)]
16932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16933pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16934    static_assert_uimm_bits!(LANE, 3);
16935    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16936}
16937#[doc = "Signed saturating doubling multiply-add long"]
16938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16939#[inline]
16940#[target_feature(enable = "neon")]
16941#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16942#[rustc_legacy_const_generics(3)]
16943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16944pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16945    static_assert_uimm_bits!(LANE, 1);
16946    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16947}
16948#[doc = "Signed saturating doubling multiply-add long"]
16949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16950#[inline]
16951#[target_feature(enable = "neon")]
16952#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16953#[rustc_legacy_const_generics(3)]
16954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16955pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16956    static_assert_uimm_bits!(LANE, 2);
16957    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16958}
16959#[doc = "Signed saturating doubling multiply-add long"]
16960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16961#[inline]
16962#[target_feature(enable = "neon")]
16963#[cfg_attr(test, assert_instr(sqdmlal))]
16964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16965pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16966    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16967    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16968}
16969#[doc = "Signed saturating doubling multiply-add long"]
16970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16971#[inline]
16972#[target_feature(enable = "neon")]
16973#[cfg_attr(test, assert_instr(sqdmlal))]
16974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16975pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16976    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16977    x
16978}
16979#[doc = "Signed saturating doubling multiply-subtract long"]
16980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16981#[inline]
16982#[target_feature(enable = "neon")]
16983#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16984#[rustc_legacy_const_generics(3)]
16985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16986pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16987    static_assert_uimm_bits!(N, 2);
16988    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16989}
16990#[doc = "Signed saturating doubling multiply-subtract long"]
16991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16992#[inline]
16993#[target_feature(enable = "neon")]
16994#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16995#[rustc_legacy_const_generics(3)]
16996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16997pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16998    static_assert_uimm_bits!(N, 3);
16999    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17000}
17001#[doc = "Signed saturating doubling multiply-subtract long"]
17002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17003#[inline]
17004#[target_feature(enable = "neon")]
17005#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17006#[rustc_legacy_const_generics(3)]
17007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17008pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17009    static_assert_uimm_bits!(N, 1);
17010    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17011}
17012#[doc = "Signed saturating doubling multiply-subtract long"]
17013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17014#[inline]
17015#[target_feature(enable = "neon")]
17016#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17017#[rustc_legacy_const_generics(3)]
17018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17019pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17020    static_assert_uimm_bits!(N, 2);
17021    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17022}
17023#[doc = "Signed saturating doubling multiply-subtract long"]
17024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17025#[inline]
17026#[target_feature(enable = "neon")]
17027#[cfg_attr(test, assert_instr(sqdmlsl2))]
17028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17029pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17030    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17031}
17032#[doc = "Signed saturating doubling multiply-subtract long"]
17033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17034#[inline]
17035#[target_feature(enable = "neon")]
17036#[cfg_attr(test, assert_instr(sqdmlsl2))]
17037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17038pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17039    vqsubq_s32(a, vqdmull_high_s16(b, c))
17040}
17041#[doc = "Signed saturating doubling multiply-subtract long"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17043#[inline]
17044#[target_feature(enable = "neon")]
17045#[cfg_attr(test, assert_instr(sqdmlsl2))]
17046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17047pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17048    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17049}
17050#[doc = "Signed saturating doubling multiply-subtract long"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17052#[inline]
17053#[target_feature(enable = "neon")]
17054#[cfg_attr(test, assert_instr(sqdmlsl2))]
17055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17056pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17057    vqsubq_s64(a, vqdmull_high_s32(b, c))
17058}
17059#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17061#[inline]
17062#[target_feature(enable = "neon")]
17063#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17064#[rustc_legacy_const_generics(3)]
17065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17066pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17067    static_assert_uimm_bits!(N, 3);
17068    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17069}
17070#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17072#[inline]
17073#[target_feature(enable = "neon")]
17074#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17075#[rustc_legacy_const_generics(3)]
17076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17077pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17078    static_assert_uimm_bits!(N, 2);
17079    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17080}
17081#[doc = "Signed saturating doubling multiply-subtract long"]
17082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17083#[inline]
17084#[target_feature(enable = "neon")]
17085#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17086#[rustc_legacy_const_generics(3)]
17087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17088pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17089    static_assert_uimm_bits!(LANE, 2);
17090    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17091}
17092#[doc = "Signed saturating doubling multiply-subtract long"]
17093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17094#[inline]
17095#[target_feature(enable = "neon")]
17096#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17097#[rustc_legacy_const_generics(3)]
17098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17099pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17100    static_assert_uimm_bits!(LANE, 3);
17101    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17102}
17103#[doc = "Signed saturating doubling multiply-subtract long"]
17104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17105#[inline]
17106#[target_feature(enable = "neon")]
17107#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17108#[rustc_legacy_const_generics(3)]
17109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17110pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17111    static_assert_uimm_bits!(LANE, 1);
17112    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17113}
17114#[doc = "Signed saturating doubling multiply-subtract long"]
17115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17116#[inline]
17117#[target_feature(enable = "neon")]
17118#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17119#[rustc_legacy_const_generics(3)]
17120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17121pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17122    static_assert_uimm_bits!(LANE, 2);
17123    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17124}
17125#[doc = "Signed saturating doubling multiply-subtract long"]
17126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17127#[inline]
17128#[target_feature(enable = "neon")]
17129#[cfg_attr(test, assert_instr(sqdmlsl))]
17130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17131pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17132    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17133    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17134}
17135#[doc = "Signed saturating doubling multiply-subtract long"]
17136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17137#[inline]
17138#[target_feature(enable = "neon")]
17139#[cfg_attr(test, assert_instr(sqdmlsl))]
17140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17141pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17142    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17143    x
17144}
17145#[doc = "Vector saturating doubling multiply high by scalar"]
17146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17147#[inline]
17148#[target_feature(enable = "neon")]
17149#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17150#[rustc_legacy_const_generics(2)]
17151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17152pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17153    static_assert_uimm_bits!(LANE, 2);
17154    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17155}
17156#[doc = "Vector saturating doubling multiply high by scalar"]
17157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17158#[inline]
17159#[target_feature(enable = "neon")]
17160#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17161#[rustc_legacy_const_generics(2)]
17162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17163pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17164    static_assert_uimm_bits!(LANE, 2);
17165    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17166}
17167#[doc = "Vector saturating doubling multiply high by scalar"]
17168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17169#[inline]
17170#[target_feature(enable = "neon")]
17171#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17172#[rustc_legacy_const_generics(2)]
17173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17174pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17175    static_assert_uimm_bits!(LANE, 1);
17176    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17177}
17178#[doc = "Vector saturating doubling multiply high by scalar"]
17179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17180#[inline]
17181#[target_feature(enable = "neon")]
17182#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17183#[rustc_legacy_const_generics(2)]
17184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17185pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17186    static_assert_uimm_bits!(LANE, 1);
17187    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17188}
17189#[doc = "Signed saturating doubling multiply returning high half"]
17190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17191#[inline]
17192#[target_feature(enable = "neon")]
17193#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17194#[rustc_legacy_const_generics(2)]
17195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17196pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17197    static_assert_uimm_bits!(N, 2);
17198    unsafe {
17199        let b: i16 = simd_extract!(b, N as u32);
17200        vqdmulhh_s16(a, b)
17201    }
17202}
17203#[doc = "Signed saturating doubling multiply returning high half"]
17204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17205#[inline]
17206#[target_feature(enable = "neon")]
17207#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17208#[rustc_legacy_const_generics(2)]
17209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17210pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17211    static_assert_uimm_bits!(N, 3);
17212    unsafe {
17213        let b: i16 = simd_extract!(b, N as u32);
17214        vqdmulhh_s16(a, b)
17215    }
17216}
17217#[doc = "Signed saturating doubling multiply returning high half"]
17218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17219#[inline]
17220#[target_feature(enable = "neon")]
17221#[cfg_attr(test, assert_instr(sqdmulh))]
17222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17223pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17224    let a: int16x4_t = vdup_n_s16(a);
17225    let b: int16x4_t = vdup_n_s16(b);
17226    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17227}
17228#[doc = "Signed saturating doubling multiply returning high half"]
17229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17230#[inline]
17231#[target_feature(enable = "neon")]
17232#[cfg_attr(test, assert_instr(sqdmulh))]
17233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17234pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17235    let a: int32x2_t = vdup_n_s32(a);
17236    let b: int32x2_t = vdup_n_s32(b);
17237    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17238}
17239#[doc = "Signed saturating doubling multiply returning high half"]
17240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17241#[inline]
17242#[target_feature(enable = "neon")]
17243#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17244#[rustc_legacy_const_generics(2)]
17245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17246pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17247    static_assert_uimm_bits!(N, 1);
17248    unsafe {
17249        let b: i32 = simd_extract!(b, N as u32);
17250        vqdmulhs_s32(a, b)
17251    }
17252}
17253#[doc = "Signed saturating doubling multiply returning high half"]
17254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17255#[inline]
17256#[target_feature(enable = "neon")]
17257#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17258#[rustc_legacy_const_generics(2)]
17259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17260pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17261    static_assert_uimm_bits!(N, 2);
17262    unsafe {
17263        let b: i32 = simd_extract!(b, N as u32);
17264        vqdmulhs_s32(a, b)
17265    }
17266}
17267#[doc = "Signed saturating doubling multiply long"]
17268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17269#[inline]
17270#[target_feature(enable = "neon")]
17271#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17272#[rustc_legacy_const_generics(2)]
17273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17274pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17275    static_assert_uimm_bits!(N, 2);
17276    unsafe {
17277        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17278        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17279        vqdmull_s16(a, b)
17280    }
17281}
17282#[doc = "Signed saturating doubling multiply long"]
17283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17284#[inline]
17285#[target_feature(enable = "neon")]
17286#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17287#[rustc_legacy_const_generics(2)]
17288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17289pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17290    static_assert_uimm_bits!(N, 2);
17291    unsafe {
17292        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17293        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17294        vqdmull_s32(a, b)
17295    }
17296}
17297#[doc = "Signed saturating doubling multiply long"]
17298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17299#[inline]
17300#[target_feature(enable = "neon")]
17301#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17302#[rustc_legacy_const_generics(2)]
17303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17304pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17305    static_assert_uimm_bits!(N, 1);
17306    unsafe {
17307        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17308        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17309        vqdmull_s32(a, b)
17310    }
17311}
17312#[doc = "Signed saturating doubling multiply long"]
17313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17314#[inline]
17315#[target_feature(enable = "neon")]
17316#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17317#[rustc_legacy_const_generics(2)]
17318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17319pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17320    static_assert_uimm_bits!(N, 3);
17321    unsafe {
17322        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17323        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17324        vqdmull_s16(a, b)
17325    }
17326}
17327#[doc = "Signed saturating doubling multiply long"]
17328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17329#[inline]
17330#[target_feature(enable = "neon")]
17331#[cfg_attr(test, assert_instr(sqdmull2))]
17332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17333pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17334    unsafe {
17335        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17336        let b: int16x4_t = vdup_n_s16(b);
17337        vqdmull_s16(a, b)
17338    }
17339}
17340#[doc = "Signed saturating doubling multiply long"]
17341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17342#[inline]
17343#[target_feature(enable = "neon")]
17344#[cfg_attr(test, assert_instr(sqdmull2))]
17345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17346pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17347    unsafe {
17348        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17349        let b: int32x2_t = vdup_n_s32(b);
17350        vqdmull_s32(a, b)
17351    }
17352}
17353#[doc = "Signed saturating doubling multiply long"]
17354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17355#[inline]
17356#[target_feature(enable = "neon")]
17357#[cfg_attr(test, assert_instr(sqdmull2))]
17358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17359pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17360    unsafe {
17361        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17362        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17363        vqdmull_s16(a, b)
17364    }
17365}
17366#[doc = "Signed saturating doubling multiply long"]
17367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17368#[inline]
17369#[target_feature(enable = "neon")]
17370#[cfg_attr(test, assert_instr(sqdmull2))]
17371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17372pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17373    unsafe {
17374        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17375        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17376        vqdmull_s32(a, b)
17377    }
17378}
17379#[doc = "Vector saturating doubling long multiply by scalar"]
17380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17381#[inline]
17382#[target_feature(enable = "neon")]
17383#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17384#[rustc_legacy_const_generics(2)]
17385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17386pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17387    static_assert_uimm_bits!(N, 3);
17388    unsafe {
17389        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17390        vqdmull_s16(a, b)
17391    }
17392}
17393#[doc = "Vector saturating doubling long multiply by scalar"]
17394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17395#[inline]
17396#[target_feature(enable = "neon")]
17397#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17398#[rustc_legacy_const_generics(2)]
17399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17400pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17401    static_assert_uimm_bits!(N, 2);
17402    unsafe {
17403        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17404        vqdmull_s32(a, b)
17405    }
17406}
17407#[doc = "Signed saturating doubling multiply long"]
17408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17409#[inline]
17410#[target_feature(enable = "neon")]
17411#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17412#[rustc_legacy_const_generics(2)]
17413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17414pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17415    static_assert_uimm_bits!(N, 2);
17416    unsafe {
17417        let b: i16 = simd_extract!(b, N as u32);
17418        vqdmullh_s16(a, b)
17419    }
17420}
17421#[doc = "Signed saturating doubling multiply long"]
17422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17423#[inline]
17424#[target_feature(enable = "neon")]
17425#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17426#[rustc_legacy_const_generics(2)]
17427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17428pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17429    static_assert_uimm_bits!(N, 2);
17430    unsafe {
17431        let b: i32 = simd_extract!(b, N as u32);
17432        vqdmulls_s32(a, b)
17433    }
17434}
17435#[doc = "Signed saturating doubling multiply long"]
17436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17437#[inline]
17438#[target_feature(enable = "neon")]
17439#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17440#[rustc_legacy_const_generics(2)]
17441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17442pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17443    static_assert_uimm_bits!(N, 3);
17444    unsafe {
17445        let b: i16 = simd_extract!(b, N as u32);
17446        vqdmullh_s16(a, b)
17447    }
17448}
17449#[doc = "Signed saturating doubling multiply long"]
17450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17451#[inline]
17452#[target_feature(enable = "neon")]
17453#[cfg_attr(test, assert_instr(sqdmull))]
17454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17455pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17456    let a: int16x4_t = vdup_n_s16(a);
17457    let b: int16x4_t = vdup_n_s16(b);
17458    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17459}
17460#[doc = "Signed saturating doubling multiply long"]
17461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17462#[inline]
17463#[target_feature(enable = "neon")]
17464#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17465#[rustc_legacy_const_generics(2)]
17466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17467pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17468    static_assert_uimm_bits!(N, 1);
17469    unsafe {
17470        let b: i32 = simd_extract!(b, N as u32);
17471        vqdmulls_s32(a, b)
17472    }
17473}
17474#[doc = "Signed saturating doubling multiply long"]
17475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17476#[inline]
17477#[target_feature(enable = "neon")]
17478#[cfg_attr(test, assert_instr(sqdmull))]
17479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17480pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17481    unsafe extern "unadjusted" {
17482        #[cfg_attr(
17483            any(target_arch = "aarch64", target_arch = "arm64ec"),
17484            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17485        )]
17486        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17487    }
17488    unsafe { _vqdmulls_s32(a, b) }
17489}
17490#[doc = "Signed saturating extract narrow"]
17491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17492#[inline]
17493#[target_feature(enable = "neon")]
17494#[cfg_attr(test, assert_instr(sqxtn2))]
17495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17496pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17497    unsafe {
17498        simd_shuffle!(
17499            a,
17500            vqmovn_s16(b),
17501            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17502        )
17503    }
17504}
17505#[doc = "Signed saturating extract narrow"]
17506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17507#[inline]
17508#[target_feature(enable = "neon")]
17509#[cfg_attr(test, assert_instr(sqxtn2))]
17510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17511pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17512    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17513}
17514#[doc = "Signed saturating extract narrow"]
17515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17516#[inline]
17517#[target_feature(enable = "neon")]
17518#[cfg_attr(test, assert_instr(sqxtn2))]
17519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17520pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17521    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17522}
17523#[doc = "Signed saturating extract narrow"]
17524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17525#[inline]
17526#[target_feature(enable = "neon")]
17527#[cfg_attr(test, assert_instr(uqxtn2))]
17528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17529pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17530    unsafe {
17531        simd_shuffle!(
17532            a,
17533            vqmovn_u16(b),
17534            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17535        )
17536    }
17537}
17538#[doc = "Signed saturating extract narrow"]
17539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17540#[inline]
17541#[target_feature(enable = "neon")]
17542#[cfg_attr(test, assert_instr(uqxtn2))]
17543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17544pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17545    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17546}
17547#[doc = "Signed saturating extract narrow"]
17548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17549#[inline]
17550#[target_feature(enable = "neon")]
17551#[cfg_attr(test, assert_instr(uqxtn2))]
17552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17553pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17554    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17555}
17556#[doc = "Saturating extract narrow"]
17557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17558#[inline]
17559#[target_feature(enable = "neon")]
17560#[cfg_attr(test, assert_instr(sqxtn))]
17561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17562pub fn vqmovnd_s64(a: i64) -> i32 {
17563    unsafe extern "unadjusted" {
17564        #[cfg_attr(
17565            any(target_arch = "aarch64", target_arch = "arm64ec"),
17566            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17567        )]
17568        fn _vqmovnd_s64(a: i64) -> i32;
17569    }
17570    unsafe { _vqmovnd_s64(a) }
17571}
17572#[doc = "Saturating extract narrow"]
17573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17574#[inline]
17575#[target_feature(enable = "neon")]
17576#[cfg_attr(test, assert_instr(uqxtn))]
17577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17578pub fn vqmovnd_u64(a: u64) -> u32 {
17579    unsafe extern "unadjusted" {
17580        #[cfg_attr(
17581            any(target_arch = "aarch64", target_arch = "arm64ec"),
17582            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17583        )]
17584        fn _vqmovnd_u64(a: u64) -> u32;
17585    }
17586    unsafe { _vqmovnd_u64(a) }
17587}
17588#[doc = "Saturating extract narrow"]
17589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17590#[inline]
17591#[target_feature(enable = "neon")]
17592#[cfg_attr(test, assert_instr(sqxtn))]
17593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17594pub fn vqmovnh_s16(a: i16) -> i8 {
17595    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17596}
17597#[doc = "Saturating extract narrow"]
17598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17599#[inline]
17600#[target_feature(enable = "neon")]
17601#[cfg_attr(test, assert_instr(sqxtn))]
17602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17603pub fn vqmovns_s32(a: i32) -> i16 {
17604    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17605}
17606#[doc = "Saturating extract narrow"]
17607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17608#[inline]
17609#[target_feature(enable = "neon")]
17610#[cfg_attr(test, assert_instr(uqxtn))]
17611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17612pub fn vqmovnh_u16(a: u16) -> u8 {
17613    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17614}
17615#[doc = "Saturating extract narrow"]
17616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17617#[inline]
17618#[target_feature(enable = "neon")]
17619#[cfg_attr(test, assert_instr(uqxtn))]
17620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17621pub fn vqmovns_u32(a: u32) -> u16 {
17622    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17623}
17624#[doc = "Signed saturating extract unsigned narrow"]
17625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17626#[inline]
17627#[target_feature(enable = "neon")]
17628#[cfg_attr(test, assert_instr(sqxtun2))]
17629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17630pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17631    unsafe {
17632        simd_shuffle!(
17633            a,
17634            vqmovun_s16(b),
17635            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17636        )
17637    }
17638}
17639#[doc = "Signed saturating extract unsigned narrow"]
17640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17641#[inline]
17642#[target_feature(enable = "neon")]
17643#[cfg_attr(test, assert_instr(sqxtun2))]
17644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17645pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17646    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17647}
17648#[doc = "Signed saturating extract unsigned narrow"]
17649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17650#[inline]
17651#[target_feature(enable = "neon")]
17652#[cfg_attr(test, assert_instr(sqxtun2))]
17653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17654pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17655    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17656}
17657#[doc = "Signed saturating extract unsigned narrow"]
17658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17659#[inline]
17660#[target_feature(enable = "neon")]
17661#[cfg_attr(test, assert_instr(sqxtun))]
17662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17663pub fn vqmovunh_s16(a: i16) -> u8 {
17664    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17665}
17666#[doc = "Signed saturating extract unsigned narrow"]
17667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17668#[inline]
17669#[target_feature(enable = "neon")]
17670#[cfg_attr(test, assert_instr(sqxtun))]
17671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17672pub fn vqmovuns_s32(a: i32) -> u16 {
17673    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17674}
17675#[doc = "Signed saturating extract unsigned narrow"]
17676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17677#[inline]
17678#[target_feature(enable = "neon")]
17679#[cfg_attr(test, assert_instr(sqxtun))]
17680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17681pub fn vqmovund_s64(a: i64) -> u32 {
17682    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17683}
17684#[doc = "Signed saturating negate"]
17685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17686#[inline]
17687#[target_feature(enable = "neon")]
17688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17689#[cfg_attr(test, assert_instr(sqneg))]
17690pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17691    unsafe extern "unadjusted" {
17692        #[cfg_attr(
17693            any(target_arch = "aarch64", target_arch = "arm64ec"),
17694            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17695        )]
17696        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17697    }
17698    unsafe { _vqneg_s64(a) }
17699}
17700#[doc = "Signed saturating negate"]
17701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17702#[inline]
17703#[target_feature(enable = "neon")]
17704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17705#[cfg_attr(test, assert_instr(sqneg))]
17706pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17707    unsafe extern "unadjusted" {
17708        #[cfg_attr(
17709            any(target_arch = "aarch64", target_arch = "arm64ec"),
17710            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17711        )]
17712        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17713    }
17714    unsafe { _vqnegq_s64(a) }
17715}
17716#[doc = "Signed saturating negate"]
17717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17718#[inline]
17719#[target_feature(enable = "neon")]
17720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17721#[cfg_attr(test, assert_instr(sqneg))]
17722pub fn vqnegb_s8(a: i8) -> i8 {
17723    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17724}
17725#[doc = "Signed saturating negate"]
17726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17727#[inline]
17728#[target_feature(enable = "neon")]
17729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17730#[cfg_attr(test, assert_instr(sqneg))]
17731pub fn vqnegh_s16(a: i16) -> i16 {
17732    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17733}
17734#[doc = "Signed saturating negate"]
17735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17736#[inline]
17737#[target_feature(enable = "neon")]
17738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17739#[cfg_attr(test, assert_instr(sqneg))]
17740pub fn vqnegs_s32(a: i32) -> i32 {
17741    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17742}
17743#[doc = "Signed saturating negate"]
17744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17745#[inline]
17746#[target_feature(enable = "neon")]
17747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17748#[cfg_attr(test, assert_instr(sqneg))]
17749pub fn vqnegd_s64(a: i64) -> i64 {
17750    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17751}
17752#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17754#[inline]
17755#[target_feature(enable = "rdm")]
17756#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17757#[rustc_legacy_const_generics(3)]
17758#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17759pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17760    static_assert_uimm_bits!(LANE, 2);
17761    unsafe {
17762        let c: int16x4_t =
17763            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17764        vqrdmlah_s16(a, b, c)
17765    }
17766}
17767#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17769#[inline]
17770#[target_feature(enable = "rdm")]
17771#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17772#[rustc_legacy_const_generics(3)]
17773#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17774pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17775    static_assert_uimm_bits!(LANE, 1);
17776    unsafe {
17777        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17778        vqrdmlah_s32(a, b, c)
17779    }
17780}
17781#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17783#[inline]
17784#[target_feature(enable = "rdm")]
17785#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17786#[rustc_legacy_const_generics(3)]
17787#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17788pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17789    static_assert_uimm_bits!(LANE, 3);
17790    unsafe {
17791        let c: int16x4_t =
17792            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17793        vqrdmlah_s16(a, b, c)
17794    }
17795}
17796#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17798#[inline]
17799#[target_feature(enable = "rdm")]
17800#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17801#[rustc_legacy_const_generics(3)]
17802#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17803pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17804    static_assert_uimm_bits!(LANE, 2);
17805    unsafe {
17806        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17807        vqrdmlah_s32(a, b, c)
17808    }
17809}
17810#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17812#[inline]
17813#[target_feature(enable = "rdm")]
17814#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17815#[rustc_legacy_const_generics(3)]
17816#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17817pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17818    static_assert_uimm_bits!(LANE, 2);
17819    unsafe {
17820        let c: int16x8_t = simd_shuffle!(
17821            c,
17822            c,
17823            [
17824                LANE as u32,
17825                LANE as u32,
17826                LANE as u32,
17827                LANE as u32,
17828                LANE as u32,
17829                LANE as u32,
17830                LANE as u32,
17831                LANE as u32
17832            ]
17833        );
17834        vqrdmlahq_s16(a, b, c)
17835    }
17836}
17837#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17839#[inline]
17840#[target_feature(enable = "rdm")]
17841#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17842#[rustc_legacy_const_generics(3)]
17843#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17844pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17845    static_assert_uimm_bits!(LANE, 1);
17846    unsafe {
17847        let c: int32x4_t =
17848            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17849        vqrdmlahq_s32(a, b, c)
17850    }
17851}
17852#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17854#[inline]
17855#[target_feature(enable = "rdm")]
17856#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17857#[rustc_legacy_const_generics(3)]
17858#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17859pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17860    static_assert_uimm_bits!(LANE, 3);
17861    unsafe {
17862        let c: int16x8_t = simd_shuffle!(
17863            c,
17864            c,
17865            [
17866                LANE as u32,
17867                LANE as u32,
17868                LANE as u32,
17869                LANE as u32,
17870                LANE as u32,
17871                LANE as u32,
17872                LANE as u32,
17873                LANE as u32
17874            ]
17875        );
17876        vqrdmlahq_s16(a, b, c)
17877    }
17878}
17879#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17881#[inline]
17882#[target_feature(enable = "rdm")]
17883#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17884#[rustc_legacy_const_generics(3)]
17885#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17886pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17887    static_assert_uimm_bits!(LANE, 2);
17888    unsafe {
17889        let c: int32x4_t =
17890            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17891        vqrdmlahq_s32(a, b, c)
17892    }
17893}
17894#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17896#[inline]
17897#[target_feature(enable = "rdm")]
17898#[cfg_attr(test, assert_instr(sqrdmlah))]
17899#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17900pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17901    unsafe extern "unadjusted" {
17902        #[cfg_attr(
17903            any(target_arch = "aarch64", target_arch = "arm64ec"),
17904            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17905        )]
17906        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17907    }
17908    unsafe { _vqrdmlah_s16(a, b, c) }
17909}
17910#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17912#[inline]
17913#[target_feature(enable = "rdm")]
17914#[cfg_attr(test, assert_instr(sqrdmlah))]
17915#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17916pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17917    unsafe extern "unadjusted" {
17918        #[cfg_attr(
17919            any(target_arch = "aarch64", target_arch = "arm64ec"),
17920            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17921        )]
17922        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17923    }
17924    unsafe { _vqrdmlahq_s16(a, b, c) }
17925}
17926#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17928#[inline]
17929#[target_feature(enable = "rdm")]
17930#[cfg_attr(test, assert_instr(sqrdmlah))]
17931#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17932pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17933    unsafe extern "unadjusted" {
17934        #[cfg_attr(
17935            any(target_arch = "aarch64", target_arch = "arm64ec"),
17936            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17937        )]
17938        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17939    }
17940    unsafe { _vqrdmlah_s32(a, b, c) }
17941}
17942#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17944#[inline]
17945#[target_feature(enable = "rdm")]
17946#[cfg_attr(test, assert_instr(sqrdmlah))]
17947#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17948pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17949    unsafe extern "unadjusted" {
17950        #[cfg_attr(
17951            any(target_arch = "aarch64", target_arch = "arm64ec"),
17952            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17953        )]
17954        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17955    }
17956    unsafe { _vqrdmlahq_s32(a, b, c) }
17957}
17958#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17960#[inline]
17961#[target_feature(enable = "rdm")]
17962#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17963#[rustc_legacy_const_generics(3)]
17964#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17965pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17966    static_assert_uimm_bits!(LANE, 2);
17967    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17968}
17969#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17971#[inline]
17972#[target_feature(enable = "rdm")]
17973#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17974#[rustc_legacy_const_generics(3)]
17975#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17976pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17977    static_assert_uimm_bits!(LANE, 3);
17978    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17979}
17980#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17982#[inline]
17983#[target_feature(enable = "rdm")]
17984#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17985#[rustc_legacy_const_generics(3)]
17986#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17987pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17988    static_assert_uimm_bits!(LANE, 1);
17989    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17990}
17991#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17993#[inline]
17994#[target_feature(enable = "rdm")]
17995#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17996#[rustc_legacy_const_generics(3)]
17997#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17998pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17999    static_assert_uimm_bits!(LANE, 2);
18000    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18001}
18002#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18004#[inline]
18005#[target_feature(enable = "rdm")]
18006#[cfg_attr(test, assert_instr(sqrdmlah))]
18007#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18008pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18009    let a: int16x4_t = vdup_n_s16(a);
18010    let b: int16x4_t = vdup_n_s16(b);
18011    let c: int16x4_t = vdup_n_s16(c);
18012    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18013}
18014#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18016#[inline]
18017#[target_feature(enable = "rdm")]
18018#[cfg_attr(test, assert_instr(sqrdmlah))]
18019#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18020pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18021    let a: int32x2_t = vdup_n_s32(a);
18022    let b: int32x2_t = vdup_n_s32(b);
18023    let c: int32x2_t = vdup_n_s32(c);
18024    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18025}
18026#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18028#[inline]
18029#[target_feature(enable = "rdm")]
18030#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18031#[rustc_legacy_const_generics(3)]
18032#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18033pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18034    static_assert_uimm_bits!(LANE, 2);
18035    unsafe {
18036        let c: int16x4_t =
18037            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18038        vqrdmlsh_s16(a, b, c)
18039    }
18040}
18041#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18043#[inline]
18044#[target_feature(enable = "rdm")]
18045#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18046#[rustc_legacy_const_generics(3)]
18047#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18048pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18049    static_assert_uimm_bits!(LANE, 1);
18050    unsafe {
18051        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18052        vqrdmlsh_s32(a, b, c)
18053    }
18054}
18055#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18057#[inline]
18058#[target_feature(enable = "rdm")]
18059#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18060#[rustc_legacy_const_generics(3)]
18061#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18062pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18063    static_assert_uimm_bits!(LANE, 3);
18064    unsafe {
18065        let c: int16x4_t =
18066            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18067        vqrdmlsh_s16(a, b, c)
18068    }
18069}
18070#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18072#[inline]
18073#[target_feature(enable = "rdm")]
18074#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18075#[rustc_legacy_const_generics(3)]
18076#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18077pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18078    static_assert_uimm_bits!(LANE, 2);
18079    unsafe {
18080        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18081        vqrdmlsh_s32(a, b, c)
18082    }
18083}
18084#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18086#[inline]
18087#[target_feature(enable = "rdm")]
18088#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18089#[rustc_legacy_const_generics(3)]
18090#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18091pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18092    static_assert_uimm_bits!(LANE, 2);
18093    unsafe {
18094        let c: int16x8_t = simd_shuffle!(
18095            c,
18096            c,
18097            [
18098                LANE as u32,
18099                LANE as u32,
18100                LANE as u32,
18101                LANE as u32,
18102                LANE as u32,
18103                LANE as u32,
18104                LANE as u32,
18105                LANE as u32
18106            ]
18107        );
18108        vqrdmlshq_s16(a, b, c)
18109    }
18110}
18111#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18113#[inline]
18114#[target_feature(enable = "rdm")]
18115#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18116#[rustc_legacy_const_generics(3)]
18117#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18118pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18119    static_assert_uimm_bits!(LANE, 1);
18120    unsafe {
18121        let c: int32x4_t =
18122            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18123        vqrdmlshq_s32(a, b, c)
18124    }
18125}
18126#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18128#[inline]
18129#[target_feature(enable = "rdm")]
18130#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18131#[rustc_legacy_const_generics(3)]
18132#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18133pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18134    static_assert_uimm_bits!(LANE, 3);
18135    unsafe {
18136        let c: int16x8_t = simd_shuffle!(
18137            c,
18138            c,
18139            [
18140                LANE as u32,
18141                LANE as u32,
18142                LANE as u32,
18143                LANE as u32,
18144                LANE as u32,
18145                LANE as u32,
18146                LANE as u32,
18147                LANE as u32
18148            ]
18149        );
18150        vqrdmlshq_s16(a, b, c)
18151    }
18152}
18153#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18155#[inline]
18156#[target_feature(enable = "rdm")]
18157#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18158#[rustc_legacy_const_generics(3)]
18159#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18160pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18161    static_assert_uimm_bits!(LANE, 2);
18162    unsafe {
18163        let c: int32x4_t =
18164            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18165        vqrdmlshq_s32(a, b, c)
18166    }
18167}
18168#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18170#[inline]
18171#[target_feature(enable = "rdm")]
18172#[cfg_attr(test, assert_instr(sqrdmlsh))]
18173#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18174pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18175    unsafe extern "unadjusted" {
18176        #[cfg_attr(
18177            any(target_arch = "aarch64", target_arch = "arm64ec"),
18178            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18179        )]
18180        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18181    }
18182    unsafe { _vqrdmlsh_s16(a, b, c) }
18183}
18184#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18186#[inline]
18187#[target_feature(enable = "rdm")]
18188#[cfg_attr(test, assert_instr(sqrdmlsh))]
18189#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18190pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18191    unsafe extern "unadjusted" {
18192        #[cfg_attr(
18193            any(target_arch = "aarch64", target_arch = "arm64ec"),
18194            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18195        )]
18196        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18197    }
18198    unsafe { _vqrdmlshq_s16(a, b, c) }
18199}
18200#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18202#[inline]
18203#[target_feature(enable = "rdm")]
18204#[cfg_attr(test, assert_instr(sqrdmlsh))]
18205#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18206pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18207    unsafe extern "unadjusted" {
18208        #[cfg_attr(
18209            any(target_arch = "aarch64", target_arch = "arm64ec"),
18210            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18211        )]
18212        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18213    }
18214    unsafe { _vqrdmlsh_s32(a, b, c) }
18215}
18216#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18218#[inline]
18219#[target_feature(enable = "rdm")]
18220#[cfg_attr(test, assert_instr(sqrdmlsh))]
18221#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18222pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18223    unsafe extern "unadjusted" {
18224        #[cfg_attr(
18225            any(target_arch = "aarch64", target_arch = "arm64ec"),
18226            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18227        )]
18228        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18229    }
18230    unsafe { _vqrdmlshq_s32(a, b, c) }
18231}
18232#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18234#[inline]
18235#[target_feature(enable = "rdm")]
18236#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18237#[rustc_legacy_const_generics(3)]
18238#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18239pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18240    static_assert_uimm_bits!(LANE, 2);
18241    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18242}
18243#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18245#[inline]
18246#[target_feature(enable = "rdm")]
18247#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18248#[rustc_legacy_const_generics(3)]
18249#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18250pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18251    static_assert_uimm_bits!(LANE, 3);
18252    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18253}
18254#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18256#[inline]
18257#[target_feature(enable = "rdm")]
18258#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18259#[rustc_legacy_const_generics(3)]
18260#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18261pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18262    static_assert_uimm_bits!(LANE, 1);
18263    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18264}
18265#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18267#[inline]
18268#[target_feature(enable = "rdm")]
18269#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18270#[rustc_legacy_const_generics(3)]
18271#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18272pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18273    static_assert_uimm_bits!(LANE, 2);
18274    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18275}
18276#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18278#[inline]
18279#[target_feature(enable = "rdm")]
18280#[cfg_attr(test, assert_instr(sqrdmlsh))]
18281#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18282pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18283    let a: int16x4_t = vdup_n_s16(a);
18284    let b: int16x4_t = vdup_n_s16(b);
18285    let c: int16x4_t = vdup_n_s16(c);
18286    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18287}
18288#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18290#[inline]
18291#[target_feature(enable = "rdm")]
18292#[cfg_attr(test, assert_instr(sqrdmlsh))]
18293#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18294pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18295    let a: int32x2_t = vdup_n_s32(a);
18296    let b: int32x2_t = vdup_n_s32(b);
18297    let c: int32x2_t = vdup_n_s32(c);
18298    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18299}
18300#[doc = "Signed saturating rounding doubling multiply returning high half"]
18301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18302#[inline]
18303#[target_feature(enable = "neon")]
18304#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18305#[rustc_legacy_const_generics(2)]
18306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18307pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18308    static_assert_uimm_bits!(LANE, 2);
18309    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18310}
18311#[doc = "Signed saturating rounding doubling multiply returning high half"]
18312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18313#[inline]
18314#[target_feature(enable = "neon")]
18315#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18316#[rustc_legacy_const_generics(2)]
18317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18318pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18319    static_assert_uimm_bits!(LANE, 3);
18320    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18321}
18322#[doc = "Signed saturating rounding doubling multiply returning high half"]
18323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18324#[inline]
18325#[target_feature(enable = "neon")]
18326#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18327#[rustc_legacy_const_generics(2)]
18328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18329pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18330    static_assert_uimm_bits!(LANE, 1);
18331    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18332}
18333#[doc = "Signed saturating rounding doubling multiply returning high half"]
18334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18335#[inline]
18336#[target_feature(enable = "neon")]
18337#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18338#[rustc_legacy_const_generics(2)]
18339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18340pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18341    static_assert_uimm_bits!(LANE, 2);
18342    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18343}
18344#[doc = "Signed saturating rounding doubling multiply returning high half"]
18345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18346#[inline]
18347#[target_feature(enable = "neon")]
18348#[cfg_attr(test, assert_instr(sqrdmulh))]
18349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18350pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18351    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18352}
18353#[doc = "Signed saturating rounding doubling multiply returning high half"]
18354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18355#[inline]
18356#[target_feature(enable = "neon")]
18357#[cfg_attr(test, assert_instr(sqrdmulh))]
18358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18359pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18360    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18361}
18362#[doc = "Signed saturating rounding shift left"]
18363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18364#[inline]
18365#[target_feature(enable = "neon")]
18366#[cfg_attr(test, assert_instr(sqrshl))]
18367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18368pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18369    let a: int8x8_t = vdup_n_s8(a);
18370    let b: int8x8_t = vdup_n_s8(b);
18371    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18372}
18373#[doc = "Signed saturating rounding shift left"]
18374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18375#[inline]
18376#[target_feature(enable = "neon")]
18377#[cfg_attr(test, assert_instr(sqrshl))]
18378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18379pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18380    let a: int16x4_t = vdup_n_s16(a);
18381    let b: int16x4_t = vdup_n_s16(b);
18382    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18383}
18384#[doc = "Unsigned signed saturating rounding shift left"]
18385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18386#[inline]
18387#[target_feature(enable = "neon")]
18388#[cfg_attr(test, assert_instr(uqrshl))]
18389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18390pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18391    let a: uint8x8_t = vdup_n_u8(a);
18392    let b: int8x8_t = vdup_n_s8(b);
18393    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18394}
18395#[doc = "Unsigned signed saturating rounding shift left"]
18396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18397#[inline]
18398#[target_feature(enable = "neon")]
18399#[cfg_attr(test, assert_instr(uqrshl))]
18400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18401pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18402    let a: uint16x4_t = vdup_n_u16(a);
18403    let b: int16x4_t = vdup_n_s16(b);
18404    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18405}
18406#[doc = "Signed saturating rounding shift left"]
18407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18408#[inline]
18409#[target_feature(enable = "neon")]
18410#[cfg_attr(test, assert_instr(sqrshl))]
18411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18412pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18413    unsafe extern "unadjusted" {
18414        #[cfg_attr(
18415            any(target_arch = "aarch64", target_arch = "arm64ec"),
18416            link_name = "llvm.aarch64.neon.sqrshl.i64"
18417        )]
18418        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18419    }
18420    unsafe { _vqrshld_s64(a, b) }
18421}
18422#[doc = "Signed saturating rounding shift left"]
18423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18424#[inline]
18425#[target_feature(enable = "neon")]
18426#[cfg_attr(test, assert_instr(sqrshl))]
18427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18428pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18429    unsafe extern "unadjusted" {
18430        #[cfg_attr(
18431            any(target_arch = "aarch64", target_arch = "arm64ec"),
18432            link_name = "llvm.aarch64.neon.sqrshl.i32"
18433        )]
18434        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18435    }
18436    unsafe { _vqrshls_s32(a, b) }
18437}
18438#[doc = "Unsigned signed saturating rounding shift left"]
18439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18440#[inline]
18441#[target_feature(enable = "neon")]
18442#[cfg_attr(test, assert_instr(uqrshl))]
18443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18444pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18445    unsafe extern "unadjusted" {
18446        #[cfg_attr(
18447            any(target_arch = "aarch64", target_arch = "arm64ec"),
18448            link_name = "llvm.aarch64.neon.uqrshl.i32"
18449        )]
18450        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18451    }
18452    unsafe { _vqrshls_u32(a, b) }
18453}
18454#[doc = "Unsigned signed saturating rounding shift left"]
18455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18456#[inline]
18457#[target_feature(enable = "neon")]
18458#[cfg_attr(test, assert_instr(uqrshl))]
18459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18460pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18461    unsafe extern "unadjusted" {
18462        #[cfg_attr(
18463            any(target_arch = "aarch64", target_arch = "arm64ec"),
18464            link_name = "llvm.aarch64.neon.uqrshl.i64"
18465        )]
18466        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18467    }
18468    unsafe { _vqrshld_u64(a, b) }
18469}
18470#[doc = "Signed saturating rounded shift right narrow"]
18471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18472#[inline]
18473#[target_feature(enable = "neon")]
18474#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18475#[rustc_legacy_const_generics(2)]
18476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18477pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18478    static_assert!(N >= 1 && N <= 8);
18479    unsafe {
18480        simd_shuffle!(
18481            a,
18482            vqrshrn_n_s16::<N>(b),
18483            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18484        )
18485    }
18486}
18487#[doc = "Signed saturating rounded shift right narrow"]
18488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18489#[inline]
18490#[target_feature(enable = "neon")]
18491#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18492#[rustc_legacy_const_generics(2)]
18493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18494pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18495    static_assert!(N >= 1 && N <= 16);
18496    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18497}
18498#[doc = "Signed saturating rounded shift right narrow"]
18499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18500#[inline]
18501#[target_feature(enable = "neon")]
18502#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18503#[rustc_legacy_const_generics(2)]
18504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18505pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18506    static_assert!(N >= 1 && N <= 32);
18507    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18508}
18509#[doc = "Unsigned saturating rounded shift right narrow"]
18510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18511#[inline]
18512#[target_feature(enable = "neon")]
18513#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18514#[rustc_legacy_const_generics(2)]
18515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18516pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18517    static_assert!(N >= 1 && N <= 8);
18518    unsafe {
18519        simd_shuffle!(
18520            a,
18521            vqrshrn_n_u16::<N>(b),
18522            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18523        )
18524    }
18525}
18526#[doc = "Unsigned saturating rounded shift right narrow"]
18527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18528#[inline]
18529#[target_feature(enable = "neon")]
18530#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18531#[rustc_legacy_const_generics(2)]
18532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18533pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18534    static_assert!(N >= 1 && N <= 16);
18535    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18536}
18537#[doc = "Unsigned saturating rounded shift right narrow"]
18538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18539#[inline]
18540#[target_feature(enable = "neon")]
18541#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18542#[rustc_legacy_const_generics(2)]
18543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18544pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18545    static_assert!(N >= 1 && N <= 32);
18546    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18547}
18548#[doc = "Unsigned saturating rounded shift right narrow"]
18549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18550#[inline]
18551#[target_feature(enable = "neon")]
18552#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18553#[rustc_legacy_const_generics(1)]
18554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18555pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18556    static_assert!(N >= 1 && N <= 32);
18557    let a: uint64x2_t = vdupq_n_u64(a);
18558    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18559}
18560#[doc = "Unsigned saturating rounded shift right narrow"]
18561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18562#[inline]
18563#[target_feature(enable = "neon")]
18564#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18565#[rustc_legacy_const_generics(1)]
18566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18567pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18568    static_assert!(N >= 1 && N <= 8);
18569    let a: uint16x8_t = vdupq_n_u16(a);
18570    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18571}
18572#[doc = "Unsigned saturating rounded shift right narrow"]
18573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18574#[inline]
18575#[target_feature(enable = "neon")]
18576#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18577#[rustc_legacy_const_generics(1)]
18578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18579pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18580    static_assert!(N >= 1 && N <= 16);
18581    let a: uint32x4_t = vdupq_n_u32(a);
18582    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18583}
18584#[doc = "Signed saturating rounded shift right narrow"]
18585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18586#[inline]
18587#[target_feature(enable = "neon")]
18588#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18589#[rustc_legacy_const_generics(1)]
18590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18591pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18592    static_assert!(N >= 1 && N <= 8);
18593    let a: int16x8_t = vdupq_n_s16(a);
18594    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18595}
18596#[doc = "Signed saturating rounded shift right narrow"]
18597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18598#[inline]
18599#[target_feature(enable = "neon")]
18600#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18601#[rustc_legacy_const_generics(1)]
18602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18603pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18604    static_assert!(N >= 1 && N <= 16);
18605    let a: int32x4_t = vdupq_n_s32(a);
18606    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18607}
18608#[doc = "Signed saturating rounded shift right narrow"]
18609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18610#[inline]
18611#[target_feature(enable = "neon")]
18612#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18613#[rustc_legacy_const_generics(1)]
18614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18615pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18616    static_assert!(N >= 1 && N <= 32);
18617    let a: int64x2_t = vdupq_n_s64(a);
18618    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18619}
18620#[doc = "Signed saturating rounded shift right unsigned narrow"]
18621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18622#[inline]
18623#[target_feature(enable = "neon")]
18624#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18625#[rustc_legacy_const_generics(2)]
18626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18627pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18628    static_assert!(N >= 1 && N <= 8);
18629    unsafe {
18630        simd_shuffle!(
18631            a,
18632            vqrshrun_n_s16::<N>(b),
18633            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18634        )
18635    }
18636}
18637#[doc = "Signed saturating rounded shift right unsigned narrow"]
18638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18639#[inline]
18640#[target_feature(enable = "neon")]
18641#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18642#[rustc_legacy_const_generics(2)]
18643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18644pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18645    static_assert!(N >= 1 && N <= 16);
18646    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18647}
18648#[doc = "Signed saturating rounded shift right unsigned narrow"]
18649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18650#[inline]
18651#[target_feature(enable = "neon")]
18652#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18653#[rustc_legacy_const_generics(2)]
18654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18655pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18656    static_assert!(N >= 1 && N <= 32);
18657    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18658}
18659#[doc = "Signed saturating rounded shift right unsigned narrow"]
18660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18661#[inline]
18662#[target_feature(enable = "neon")]
18663#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18664#[rustc_legacy_const_generics(1)]
18665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18666pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18667    static_assert!(N >= 1 && N <= 32);
18668    let a: int64x2_t = vdupq_n_s64(a);
18669    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18670}
18671#[doc = "Signed saturating rounded shift right unsigned narrow"]
18672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18673#[inline]
18674#[target_feature(enable = "neon")]
18675#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18676#[rustc_legacy_const_generics(1)]
18677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18678pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18679    static_assert!(N >= 1 && N <= 8);
18680    let a: int16x8_t = vdupq_n_s16(a);
18681    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18682}
18683#[doc = "Signed saturating rounded shift right unsigned narrow"]
18684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18685#[inline]
18686#[target_feature(enable = "neon")]
18687#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18688#[rustc_legacy_const_generics(1)]
18689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18690pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18691    static_assert!(N >= 1 && N <= 16);
18692    let a: int32x4_t = vdupq_n_s32(a);
18693    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18694}
18695#[doc = "Signed saturating shift left"]
18696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18697#[inline]
18698#[target_feature(enable = "neon")]
18699#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18700#[rustc_legacy_const_generics(1)]
18701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18702pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18703    static_assert_uimm_bits!(N, 3);
18704    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18705}
18706#[doc = "Signed saturating shift left"]
18707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18708#[inline]
18709#[target_feature(enable = "neon")]
18710#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18711#[rustc_legacy_const_generics(1)]
18712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18713pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18714    static_assert_uimm_bits!(N, 6);
18715    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18716}
18717#[doc = "Signed saturating shift left"]
18718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18719#[inline]
18720#[target_feature(enable = "neon")]
18721#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18722#[rustc_legacy_const_generics(1)]
18723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18724pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18725    static_assert_uimm_bits!(N, 4);
18726    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18727}
18728#[doc = "Signed saturating shift left"]
18729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18730#[inline]
18731#[target_feature(enable = "neon")]
18732#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18733#[rustc_legacy_const_generics(1)]
18734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18735pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18736    static_assert_uimm_bits!(N, 5);
18737    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18738}
18739#[doc = "Unsigned saturating shift left"]
18740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18741#[inline]
18742#[target_feature(enable = "neon")]
18743#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18744#[rustc_legacy_const_generics(1)]
18745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18746pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18747    static_assert_uimm_bits!(N, 3);
18748    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18749}
18750#[doc = "Unsigned saturating shift left"]
18751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18752#[inline]
18753#[target_feature(enable = "neon")]
18754#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18755#[rustc_legacy_const_generics(1)]
18756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18757pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18758    static_assert_uimm_bits!(N, 6);
18759    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18760}
18761#[doc = "Unsigned saturating shift left"]
18762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18763#[inline]
18764#[target_feature(enable = "neon")]
18765#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18766#[rustc_legacy_const_generics(1)]
18767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18768pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18769    static_assert_uimm_bits!(N, 4);
18770    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18771}
18772#[doc = "Unsigned saturating shift left"]
18773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18774#[inline]
18775#[target_feature(enable = "neon")]
18776#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18777#[rustc_legacy_const_generics(1)]
18778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18779pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18780    static_assert_uimm_bits!(N, 5);
18781    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18782}
18783#[doc = "Signed saturating shift left"]
18784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18785#[inline]
18786#[target_feature(enable = "neon")]
18787#[cfg_attr(test, assert_instr(sqshl))]
18788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18789pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18790    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18791    unsafe { simd_extract!(c, 0) }
18792}
18793#[doc = "Signed saturating shift left"]
18794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18795#[inline]
18796#[target_feature(enable = "neon")]
18797#[cfg_attr(test, assert_instr(sqshl))]
18798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18799pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18800    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18801    unsafe { simd_extract!(c, 0) }
18802}
18803#[doc = "Signed saturating shift left"]
18804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18805#[inline]
18806#[target_feature(enable = "neon")]
18807#[cfg_attr(test, assert_instr(sqshl))]
18808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18809pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18810    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18811    unsafe { simd_extract!(c, 0) }
18812}
18813#[doc = "Unsigned saturating shift left"]
18814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18815#[inline]
18816#[target_feature(enable = "neon")]
18817#[cfg_attr(test, assert_instr(uqshl))]
18818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18819pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18820    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18821    unsafe { simd_extract!(c, 0) }
18822}
18823#[doc = "Unsigned saturating shift left"]
18824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18825#[inline]
18826#[target_feature(enable = "neon")]
18827#[cfg_attr(test, assert_instr(uqshl))]
18828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18829pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18830    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18831    unsafe { simd_extract!(c, 0) }
18832}
18833#[doc = "Unsigned saturating shift left"]
18834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18835#[inline]
18836#[target_feature(enable = "neon")]
18837#[cfg_attr(test, assert_instr(uqshl))]
18838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18839pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18840    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18841    unsafe { simd_extract!(c, 0) }
18842}
18843#[doc = "Signed saturating shift left"]
18844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18845#[inline]
18846#[target_feature(enable = "neon")]
18847#[cfg_attr(test, assert_instr(sqshl))]
18848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18849pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18850    unsafe extern "unadjusted" {
18851        #[cfg_attr(
18852            any(target_arch = "aarch64", target_arch = "arm64ec"),
18853            link_name = "llvm.aarch64.neon.sqshl.i64"
18854        )]
18855        fn _vqshld_s64(a: i64, b: i64) -> i64;
18856    }
18857    unsafe { _vqshld_s64(a, b) }
18858}
18859#[doc = "Unsigned saturating shift left"]
18860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18861#[inline]
18862#[target_feature(enable = "neon")]
18863#[cfg_attr(test, assert_instr(uqshl))]
18864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18865pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18866    unsafe extern "unadjusted" {
18867        #[cfg_attr(
18868            any(target_arch = "aarch64", target_arch = "arm64ec"),
18869            link_name = "llvm.aarch64.neon.uqshl.i64"
18870        )]
18871        fn _vqshld_u64(a: u64, b: i64) -> u64;
18872    }
18873    unsafe { _vqshld_u64(a, b) }
18874}
18875#[doc = "Signed saturating shift left unsigned"]
18876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18877#[inline]
18878#[target_feature(enable = "neon")]
18879#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18880#[rustc_legacy_const_generics(1)]
18881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18882pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18883    static_assert_uimm_bits!(N, 3);
18884    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18885}
18886#[doc = "Signed saturating shift left unsigned"]
18887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18888#[inline]
18889#[target_feature(enable = "neon")]
18890#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18891#[rustc_legacy_const_generics(1)]
18892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18893pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18894    static_assert_uimm_bits!(N, 6);
18895    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18896}
18897#[doc = "Signed saturating shift left unsigned"]
18898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18899#[inline]
18900#[target_feature(enable = "neon")]
18901#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18902#[rustc_legacy_const_generics(1)]
18903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18904pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18905    static_assert_uimm_bits!(N, 4);
18906    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18907}
18908#[doc = "Signed saturating shift left unsigned"]
18909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18910#[inline]
18911#[target_feature(enable = "neon")]
18912#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18913#[rustc_legacy_const_generics(1)]
18914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18915pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18916    static_assert_uimm_bits!(N, 5);
18917    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18918}
18919#[doc = "Signed saturating shift right narrow"]
18920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18921#[inline]
18922#[target_feature(enable = "neon")]
18923#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18924#[rustc_legacy_const_generics(2)]
18925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18926pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18927    static_assert!(N >= 1 && N <= 8);
18928    unsafe {
18929        simd_shuffle!(
18930            a,
18931            vqshrn_n_s16::<N>(b),
18932            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18933        )
18934    }
18935}
18936#[doc = "Signed saturating shift right narrow"]
18937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18938#[inline]
18939#[target_feature(enable = "neon")]
18940#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18941#[rustc_legacy_const_generics(2)]
18942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18943pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18944    static_assert!(N >= 1 && N <= 16);
18945    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18946}
18947#[doc = "Signed saturating shift right narrow"]
18948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18949#[inline]
18950#[target_feature(enable = "neon")]
18951#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18952#[rustc_legacy_const_generics(2)]
18953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18954pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18955    static_assert!(N >= 1 && N <= 32);
18956    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18957}
18958#[doc = "Unsigned saturating shift right narrow"]
18959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18960#[inline]
18961#[target_feature(enable = "neon")]
18962#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18963#[rustc_legacy_const_generics(2)]
18964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18965pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18966    static_assert!(N >= 1 && N <= 8);
18967    unsafe {
18968        simd_shuffle!(
18969            a,
18970            vqshrn_n_u16::<N>(b),
18971            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18972        )
18973    }
18974}
18975#[doc = "Unsigned saturating shift right narrow"]
18976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18977#[inline]
18978#[target_feature(enable = "neon")]
18979#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18980#[rustc_legacy_const_generics(2)]
18981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18982pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18983    static_assert!(N >= 1 && N <= 16);
18984    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18985}
18986#[doc = "Unsigned saturating shift right narrow"]
18987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18988#[inline]
18989#[target_feature(enable = "neon")]
18990#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18991#[rustc_legacy_const_generics(2)]
18992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18993pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18994    static_assert!(N >= 1 && N <= 32);
18995    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18996}
18997#[doc = "Signed saturating shift right narrow"]
18998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18999#[inline]
19000#[target_feature(enable = "neon")]
19001#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19002#[rustc_legacy_const_generics(1)]
19003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19004pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19005    static_assert!(N >= 1 && N <= 32);
19006    unsafe extern "unadjusted" {
19007        #[cfg_attr(
19008            any(target_arch = "aarch64", target_arch = "arm64ec"),
19009            link_name = "llvm.aarch64.neon.sqshrn.i32"
19010        )]
19011        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19012    }
19013    unsafe { _vqshrnd_n_s64(a, N) }
19014}
19015#[doc = "Unsigned saturating shift right narrow"]
19016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19017#[inline]
19018#[target_feature(enable = "neon")]
19019#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19020#[rustc_legacy_const_generics(1)]
19021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19022pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19023    static_assert!(N >= 1 && N <= 32);
19024    unsafe extern "unadjusted" {
19025        #[cfg_attr(
19026            any(target_arch = "aarch64", target_arch = "arm64ec"),
19027            link_name = "llvm.aarch64.neon.uqshrn.i32"
19028        )]
19029        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19030    }
19031    unsafe { _vqshrnd_n_u64(a, N) }
19032}
19033#[doc = "Signed saturating shift right narrow"]
19034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19035#[inline]
19036#[target_feature(enable = "neon")]
19037#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19038#[rustc_legacy_const_generics(1)]
19039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19040pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19041    static_assert!(N >= 1 && N <= 8);
19042    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19043}
19044#[doc = "Signed saturating shift right narrow"]
19045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19046#[inline]
19047#[target_feature(enable = "neon")]
19048#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19049#[rustc_legacy_const_generics(1)]
19050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19051pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19052    static_assert!(N >= 1 && N <= 16);
19053    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19054}
19055#[doc = "Unsigned saturating shift right narrow"]
19056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19057#[inline]
19058#[target_feature(enable = "neon")]
19059#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19060#[rustc_legacy_const_generics(1)]
19061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19062pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19063    static_assert!(N >= 1 && N <= 8);
19064    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19065}
19066#[doc = "Unsigned saturating shift right narrow"]
19067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19068#[inline]
19069#[target_feature(enable = "neon")]
19070#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19071#[rustc_legacy_const_generics(1)]
19072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19073pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19074    static_assert!(N >= 1 && N <= 16);
19075    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19076}
19077#[doc = "Signed saturating shift right unsigned narrow"]
19078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19079#[inline]
19080#[target_feature(enable = "neon")]
19081#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19082#[rustc_legacy_const_generics(2)]
19083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19084pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19085    static_assert!(N >= 1 && N <= 8);
19086    unsafe {
19087        simd_shuffle!(
19088            a,
19089            vqshrun_n_s16::<N>(b),
19090            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19091        )
19092    }
19093}
19094#[doc = "Signed saturating shift right unsigned narrow"]
19095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19096#[inline]
19097#[target_feature(enable = "neon")]
19098#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19099#[rustc_legacy_const_generics(2)]
19100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19101pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19102    static_assert!(N >= 1 && N <= 16);
19103    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19104}
19105#[doc = "Signed saturating shift right unsigned narrow"]
19106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19107#[inline]
19108#[target_feature(enable = "neon")]
19109#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19110#[rustc_legacy_const_generics(2)]
19111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19112pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19113    static_assert!(N >= 1 && N <= 32);
19114    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19115}
19116#[doc = "Signed saturating shift right unsigned narrow"]
19117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19118#[inline]
19119#[target_feature(enable = "neon")]
19120#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19121#[rustc_legacy_const_generics(1)]
19122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19123pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19124    static_assert!(N >= 1 && N <= 32);
19125    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19126}
19127#[doc = "Signed saturating shift right unsigned narrow"]
19128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19129#[inline]
19130#[target_feature(enable = "neon")]
19131#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19132#[rustc_legacy_const_generics(1)]
19133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19134pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19135    static_assert!(N >= 1 && N <= 8);
19136    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19137}
19138#[doc = "Signed saturating shift right unsigned narrow"]
19139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19140#[inline]
19141#[target_feature(enable = "neon")]
19142#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19143#[rustc_legacy_const_generics(1)]
19144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19145pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19146    static_assert!(N >= 1 && N <= 16);
19147    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19148}
19149#[doc = "Saturating subtract"]
19150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19151#[inline]
19152#[target_feature(enable = "neon")]
19153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19154#[cfg_attr(test, assert_instr(sqsub))]
19155pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19156    let a: int8x8_t = vdup_n_s8(a);
19157    let b: int8x8_t = vdup_n_s8(b);
19158    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19159}
19160#[doc = "Saturating subtract"]
19161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19162#[inline]
19163#[target_feature(enable = "neon")]
19164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19165#[cfg_attr(test, assert_instr(sqsub))]
19166pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19167    let a: int16x4_t = vdup_n_s16(a);
19168    let b: int16x4_t = vdup_n_s16(b);
19169    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19170}
19171#[doc = "Saturating subtract"]
19172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19173#[inline]
19174#[target_feature(enable = "neon")]
19175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19176#[cfg_attr(test, assert_instr(uqsub))]
19177pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19178    let a: uint8x8_t = vdup_n_u8(a);
19179    let b: uint8x8_t = vdup_n_u8(b);
19180    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19181}
19182#[doc = "Saturating subtract"]
19183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19184#[inline]
19185#[target_feature(enable = "neon")]
19186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19187#[cfg_attr(test, assert_instr(uqsub))]
19188pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19189    let a: uint16x4_t = vdup_n_u16(a);
19190    let b: uint16x4_t = vdup_n_u16(b);
19191    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19192}
19193#[doc = "Saturating subtract"]
19194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19195#[inline]
19196#[target_feature(enable = "neon")]
19197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19198#[cfg_attr(test, assert_instr(sqsub))]
19199pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19200    unsafe extern "unadjusted" {
19201        #[cfg_attr(
19202            any(target_arch = "aarch64", target_arch = "arm64ec"),
19203            link_name = "llvm.aarch64.neon.sqsub.i32"
19204        )]
19205        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19206    }
19207    unsafe { _vqsubs_s32(a, b) }
19208}
19209#[doc = "Saturating subtract"]
19210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19211#[inline]
19212#[target_feature(enable = "neon")]
19213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19214#[cfg_attr(test, assert_instr(sqsub))]
19215pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19216    unsafe extern "unadjusted" {
19217        #[cfg_attr(
19218            any(target_arch = "aarch64", target_arch = "arm64ec"),
19219            link_name = "llvm.aarch64.neon.sqsub.i64"
19220        )]
19221        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19222    }
19223    unsafe { _vqsubd_s64(a, b) }
19224}
19225#[doc = "Saturating subtract"]
19226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19227#[inline]
19228#[target_feature(enable = "neon")]
19229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19230#[cfg_attr(test, assert_instr(uqsub))]
19231pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19232    unsafe extern "unadjusted" {
19233        #[cfg_attr(
19234            any(target_arch = "aarch64", target_arch = "arm64ec"),
19235            link_name = "llvm.aarch64.neon.uqsub.i32"
19236        )]
19237        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19238    }
19239    unsafe { _vqsubs_u32(a, b) }
19240}
19241#[doc = "Saturating subtract"]
19242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19243#[inline]
19244#[target_feature(enable = "neon")]
19245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19246#[cfg_attr(test, assert_instr(uqsub))]
19247pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19248    unsafe extern "unadjusted" {
19249        #[cfg_attr(
19250            any(target_arch = "aarch64", target_arch = "arm64ec"),
19251            link_name = "llvm.aarch64.neon.uqsub.i64"
19252        )]
19253        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19254    }
19255    unsafe { _vqsubd_u64(a, b) }
19256}
19257#[doc = "Table look-up"]
19258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19259#[inline]
19260#[target_feature(enable = "neon")]
19261#[cfg_attr(test, assert_instr(tbl))]
19262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19263fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19264    unsafe extern "unadjusted" {
19265        #[cfg_attr(
19266            any(target_arch = "aarch64", target_arch = "arm64ec"),
19267            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19268        )]
19269        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19270    }
19271    unsafe { _vqtbl1(a, b) }
19272}
19273#[doc = "Table look-up"]
19274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19275#[inline]
19276#[target_feature(enable = "neon")]
19277#[cfg_attr(test, assert_instr(tbl))]
19278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19279fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19280    unsafe extern "unadjusted" {
19281        #[cfg_attr(
19282            any(target_arch = "aarch64", target_arch = "arm64ec"),
19283            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19284        )]
19285        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19286    }
19287    unsafe { _vqtbl1q(a, b) }
19288}
19289#[doc = "Table look-up"]
19290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19291#[inline]
19292#[target_feature(enable = "neon")]
19293#[cfg_attr(test, assert_instr(tbl))]
19294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19295pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19296    vqtbl1(a, b)
19297}
19298#[doc = "Table look-up"]
19299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19300#[inline]
19301#[target_feature(enable = "neon")]
19302#[cfg_attr(test, assert_instr(tbl))]
19303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19304pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19305    vqtbl1q(a, b)
19306}
19307#[doc = "Table look-up"]
19308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19309#[inline]
19310#[target_feature(enable = "neon")]
19311#[cfg_attr(test, assert_instr(tbl))]
19312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19313pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19314    unsafe { transmute(vqtbl1(transmute(a), b)) }
19315}
19316#[doc = "Table look-up"]
19317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19318#[inline]
19319#[target_feature(enable = "neon")]
19320#[cfg_attr(test, assert_instr(tbl))]
19321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19322pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19323    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19324}
19325#[doc = "Table look-up"]
19326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19327#[inline]
19328#[target_feature(enable = "neon")]
19329#[cfg_attr(test, assert_instr(tbl))]
19330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19331pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19332    unsafe { transmute(vqtbl1(transmute(a), b)) }
19333}
19334#[doc = "Table look-up"]
19335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19336#[inline]
19337#[target_feature(enable = "neon")]
19338#[cfg_attr(test, assert_instr(tbl))]
19339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19340pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19341    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19342}
19343#[doc = "Table look-up"]
19344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19345#[inline]
19346#[target_feature(enable = "neon")]
19347#[cfg_attr(test, assert_instr(tbl))]
19348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19349fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19350    unsafe extern "unadjusted" {
19351        #[cfg_attr(
19352            any(target_arch = "aarch64", target_arch = "arm64ec"),
19353            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19354        )]
19355        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19356    }
19357    unsafe { _vqtbl2(a, b, c) }
19358}
19359#[doc = "Table look-up"]
19360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19361#[inline]
19362#[target_feature(enable = "neon")]
19363#[cfg_attr(test, assert_instr(tbl))]
19364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19365fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19366    unsafe extern "unadjusted" {
19367        #[cfg_attr(
19368            any(target_arch = "aarch64", target_arch = "arm64ec"),
19369            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19370        )]
19371        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19372    }
19373    unsafe { _vqtbl2q(a, b, c) }
19374}
19375#[doc = "Table look-up"]
19376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19377#[inline]
19378#[target_feature(enable = "neon")]
19379#[cfg_attr(test, assert_instr(tbl))]
19380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19381pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19382    vqtbl2(a.0, a.1, b)
19383}
19384#[doc = "Table look-up"]
19385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19386#[inline]
19387#[target_feature(enable = "neon")]
19388#[cfg_attr(test, assert_instr(tbl))]
19389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19390pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19391    vqtbl2q(a.0, a.1, b)
19392}
19393#[doc = "Table look-up"]
19394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19395#[inline]
19396#[cfg(target_endian = "little")]
19397#[target_feature(enable = "neon")]
19398#[cfg_attr(test, assert_instr(tbl))]
19399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19400pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19401    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19402}
19403#[doc = "Table look-up"]
19404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19405#[inline]
19406#[cfg(target_endian = "big")]
19407#[target_feature(enable = "neon")]
19408#[cfg_attr(test, assert_instr(tbl))]
19409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19410pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19411    let mut a: uint8x16x2_t = a;
19412    a.0 = unsafe {
19413        simd_shuffle!(
19414            a.0,
19415            a.0,
19416            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19417        )
19418    };
19419    a.1 = unsafe {
19420        simd_shuffle!(
19421            a.1,
19422            a.1,
19423            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19424        )
19425    };
19426    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19427    unsafe {
19428        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19429        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19430    }
19431}
19432#[doc = "Table look-up"]
19433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19434#[inline]
19435#[cfg(target_endian = "little")]
19436#[target_feature(enable = "neon")]
19437#[cfg_attr(test, assert_instr(tbl))]
19438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19439pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19440    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19441}
19442#[doc = "Table look-up"]
19443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19444#[inline]
19445#[cfg(target_endian = "big")]
19446#[target_feature(enable = "neon")]
19447#[cfg_attr(test, assert_instr(tbl))]
19448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19449pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19450    let mut a: uint8x16x2_t = a;
19451    a.0 = unsafe {
19452        simd_shuffle!(
19453            a.0,
19454            a.0,
19455            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19456        )
19457    };
19458    a.1 = unsafe {
19459        simd_shuffle!(
19460            a.1,
19461            a.1,
19462            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19463        )
19464    };
19465    let b: uint8x16_t =
19466        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19467    unsafe {
19468        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19469        simd_shuffle!(
19470            ret_val,
19471            ret_val,
19472            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19473        )
19474    }
19475}
19476#[doc = "Table look-up"]
19477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19478#[inline]
19479#[cfg(target_endian = "little")]
19480#[target_feature(enable = "neon")]
19481#[cfg_attr(test, assert_instr(tbl))]
19482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19483pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19484    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19485}
19486#[doc = "Table look-up"]
19487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19488#[inline]
19489#[cfg(target_endian = "big")]
19490#[target_feature(enable = "neon")]
19491#[cfg_attr(test, assert_instr(tbl))]
19492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19493pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19494    let mut a: poly8x16x2_t = a;
19495    a.0 = unsafe {
19496        simd_shuffle!(
19497            a.0,
19498            a.0,
19499            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19500        )
19501    };
19502    a.1 = unsafe {
19503        simd_shuffle!(
19504            a.1,
19505            a.1,
19506            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19507        )
19508    };
19509    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19510    unsafe {
19511        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19512        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19513    }
19514}
19515#[doc = "Table look-up"]
19516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19517#[inline]
19518#[cfg(target_endian = "little")]
19519#[target_feature(enable = "neon")]
19520#[cfg_attr(test, assert_instr(tbl))]
19521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19522pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19523    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19524}
19525#[doc = "Table look-up"]
19526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19527#[inline]
19528#[cfg(target_endian = "big")]
19529#[target_feature(enable = "neon")]
19530#[cfg_attr(test, assert_instr(tbl))]
19531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19532pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19533    let mut a: poly8x16x2_t = a;
19534    a.0 = unsafe {
19535        simd_shuffle!(
19536            a.0,
19537            a.0,
19538            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19539        )
19540    };
19541    a.1 = unsafe {
19542        simd_shuffle!(
19543            a.1,
19544            a.1,
19545            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19546        )
19547    };
19548    let b: uint8x16_t =
19549        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19550    unsafe {
19551        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19552        simd_shuffle!(
19553            ret_val,
19554            ret_val,
19555            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19556        )
19557    }
19558}
19559#[doc = "Table look-up"]
19560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19561#[inline]
19562#[target_feature(enable = "neon")]
19563#[cfg_attr(test, assert_instr(tbl))]
19564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19565fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19566    unsafe extern "unadjusted" {
19567        #[cfg_attr(
19568            any(target_arch = "aarch64", target_arch = "arm64ec"),
19569            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19570        )]
19571        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19572    }
19573    unsafe { _vqtbl3(a, b, c, d) }
19574}
19575#[doc = "Table look-up"]
19576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19577#[inline]
19578#[target_feature(enable = "neon")]
19579#[cfg_attr(test, assert_instr(tbl))]
19580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19581fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19582    unsafe extern "unadjusted" {
19583        #[cfg_attr(
19584            any(target_arch = "aarch64", target_arch = "arm64ec"),
19585            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19586        )]
19587        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19588    }
19589    unsafe { _vqtbl3q(a, b, c, d) }
19590}
19591#[doc = "Table look-up"]
19592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19593#[inline]
19594#[target_feature(enable = "neon")]
19595#[cfg_attr(test, assert_instr(tbl))]
19596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19597pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19598    vqtbl3(a.0, a.1, a.2, b)
19599}
19600#[doc = "Table look-up"]
19601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19602#[inline]
19603#[target_feature(enable = "neon")]
19604#[cfg_attr(test, assert_instr(tbl))]
19605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19606pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19607    vqtbl3q(a.0, a.1, a.2, b)
19608}
19609#[doc = "Table look-up"]
19610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19611#[inline]
19612#[cfg(target_endian = "little")]
19613#[target_feature(enable = "neon")]
19614#[cfg_attr(test, assert_instr(tbl))]
19615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19616pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19617    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19618}
19619#[doc = "Table look-up"]
19620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19621#[inline]
19622#[cfg(target_endian = "big")]
19623#[target_feature(enable = "neon")]
19624#[cfg_attr(test, assert_instr(tbl))]
19625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19626pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19627    let mut a: uint8x16x3_t = a;
19628    a.0 = unsafe {
19629        simd_shuffle!(
19630            a.0,
19631            a.0,
19632            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19633        )
19634    };
19635    a.1 = unsafe {
19636        simd_shuffle!(
19637            a.1,
19638            a.1,
19639            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19640        )
19641    };
19642    a.2 = unsafe {
19643        simd_shuffle!(
19644            a.2,
19645            a.2,
19646            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19647        )
19648    };
19649    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19650    unsafe {
19651        let ret_val: uint8x8_t =
19652            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19653        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19654    }
19655}
19656#[doc = "Table look-up"]
19657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19658#[inline]
19659#[cfg(target_endian = "little")]
19660#[target_feature(enable = "neon")]
19661#[cfg_attr(test, assert_instr(tbl))]
19662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19663pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19664    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19665}
19666#[doc = "Table look-up"]
19667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19668#[inline]
19669#[cfg(target_endian = "big")]
19670#[target_feature(enable = "neon")]
19671#[cfg_attr(test, assert_instr(tbl))]
19672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19673pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19674    let mut a: uint8x16x3_t = a;
19675    a.0 = unsafe {
19676        simd_shuffle!(
19677            a.0,
19678            a.0,
19679            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19680        )
19681    };
19682    a.1 = unsafe {
19683        simd_shuffle!(
19684            a.1,
19685            a.1,
19686            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19687        )
19688    };
19689    a.2 = unsafe {
19690        simd_shuffle!(
19691            a.2,
19692            a.2,
19693            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19694        )
19695    };
19696    let b: uint8x16_t =
19697        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19698    unsafe {
19699        let ret_val: uint8x16_t =
19700            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19701        simd_shuffle!(
19702            ret_val,
19703            ret_val,
19704            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19705        )
19706    }
19707}
19708#[doc = "Table look-up"]
19709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19710#[inline]
19711#[cfg(target_endian = "little")]
19712#[target_feature(enable = "neon")]
19713#[cfg_attr(test, assert_instr(tbl))]
19714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19715pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19716    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19717}
19718#[doc = "Table look-up"]
19719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19720#[inline]
19721#[cfg(target_endian = "big")]
19722#[target_feature(enable = "neon")]
19723#[cfg_attr(test, assert_instr(tbl))]
19724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19725pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19726    let mut a: poly8x16x3_t = a;
19727    a.0 = unsafe {
19728        simd_shuffle!(
19729            a.0,
19730            a.0,
19731            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19732        )
19733    };
19734    a.1 = unsafe {
19735        simd_shuffle!(
19736            a.1,
19737            a.1,
19738            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19739        )
19740    };
19741    a.2 = unsafe {
19742        simd_shuffle!(
19743            a.2,
19744            a.2,
19745            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19746        )
19747    };
19748    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19749    unsafe {
19750        let ret_val: poly8x8_t =
19751            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19752        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19753    }
19754}
19755#[doc = "Table look-up"]
19756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19757#[inline]
19758#[cfg(target_endian = "little")]
19759#[target_feature(enable = "neon")]
19760#[cfg_attr(test, assert_instr(tbl))]
19761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19762pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19763    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19764}
19765#[doc = "Table look-up"]
19766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19767#[inline]
19768#[cfg(target_endian = "big")]
19769#[target_feature(enable = "neon")]
19770#[cfg_attr(test, assert_instr(tbl))]
19771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19772pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19773    let mut a: poly8x16x3_t = a;
19774    a.0 = unsafe {
19775        simd_shuffle!(
19776            a.0,
19777            a.0,
19778            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19779        )
19780    };
19781    a.1 = unsafe {
19782        simd_shuffle!(
19783            a.1,
19784            a.1,
19785            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19786        )
19787    };
19788    a.2 = unsafe {
19789        simd_shuffle!(
19790            a.2,
19791            a.2,
19792            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19793        )
19794    };
19795    let b: uint8x16_t =
19796        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19797    unsafe {
19798        let ret_val: poly8x16_t =
19799            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19800        simd_shuffle!(
19801            ret_val,
19802            ret_val,
19803            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19804        )
19805    }
19806}
19807#[doc = "Table look-up"]
19808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19809#[inline]
19810#[target_feature(enable = "neon")]
19811#[cfg_attr(test, assert_instr(tbl))]
19812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19813fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19814    unsafe extern "unadjusted" {
19815        #[cfg_attr(
19816            any(target_arch = "aarch64", target_arch = "arm64ec"),
19817            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19818        )]
19819        fn _vqtbl4(
19820            a: int8x16_t,
19821            b: int8x16_t,
19822            c: int8x16_t,
19823            d: int8x16_t,
19824            e: uint8x8_t,
19825        ) -> int8x8_t;
19826    }
19827    unsafe { _vqtbl4(a, b, c, d, e) }
19828}
19829#[doc = "Table look-up"]
19830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19831#[inline]
19832#[target_feature(enable = "neon")]
19833#[cfg_attr(test, assert_instr(tbl))]
19834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19835fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19836    unsafe extern "unadjusted" {
19837        #[cfg_attr(
19838            any(target_arch = "aarch64", target_arch = "arm64ec"),
19839            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19840        )]
19841        fn _vqtbl4q(
19842            a: int8x16_t,
19843            b: int8x16_t,
19844            c: int8x16_t,
19845            d: int8x16_t,
19846            e: uint8x16_t,
19847        ) -> int8x16_t;
19848    }
19849    unsafe { _vqtbl4q(a, b, c, d, e) }
19850}
19851#[doc = "Table look-up"]
19852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19853#[inline]
19854#[target_feature(enable = "neon")]
19855#[cfg_attr(test, assert_instr(tbl))]
19856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19857pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19858    vqtbl4(a.0, a.1, a.2, a.3, b)
19859}
19860#[doc = "Table look-up"]
19861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19862#[inline]
19863#[target_feature(enable = "neon")]
19864#[cfg_attr(test, assert_instr(tbl))]
19865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19866pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19867    vqtbl4q(a.0, a.1, a.2, a.3, b)
19868}
19869#[doc = "Table look-up"]
19870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19871#[inline]
19872#[cfg(target_endian = "little")]
19873#[target_feature(enable = "neon")]
19874#[cfg_attr(test, assert_instr(tbl))]
19875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19876pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19877    unsafe {
19878        transmute(vqtbl4(
19879            transmute(a.0),
19880            transmute(a.1),
19881            transmute(a.2),
19882            transmute(a.3),
19883            b,
19884        ))
19885    }
19886}
19887#[doc = "Table look-up"]
19888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19889#[inline]
19890#[cfg(target_endian = "big")]
19891#[target_feature(enable = "neon")]
19892#[cfg_attr(test, assert_instr(tbl))]
19893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19894pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19895    let mut a: uint8x16x4_t = a;
19896    a.0 = unsafe {
19897        simd_shuffle!(
19898            a.0,
19899            a.0,
19900            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19901        )
19902    };
19903    a.1 = unsafe {
19904        simd_shuffle!(
19905            a.1,
19906            a.1,
19907            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19908        )
19909    };
19910    a.2 = unsafe {
19911        simd_shuffle!(
19912            a.2,
19913            a.2,
19914            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19915        )
19916    };
19917    a.3 = unsafe {
19918        simd_shuffle!(
19919            a.3,
19920            a.3,
19921            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19922        )
19923    };
19924    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19925    unsafe {
19926        let ret_val: uint8x8_t = transmute(vqtbl4(
19927            transmute(a.0),
19928            transmute(a.1),
19929            transmute(a.2),
19930            transmute(a.3),
19931            b,
19932        ));
19933        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19934    }
19935}
19936#[doc = "Table look-up"]
19937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19938#[inline]
19939#[cfg(target_endian = "little")]
19940#[target_feature(enable = "neon")]
19941#[cfg_attr(test, assert_instr(tbl))]
19942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19943pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19944    unsafe {
19945        transmute(vqtbl4q(
19946            transmute(a.0),
19947            transmute(a.1),
19948            transmute(a.2),
19949            transmute(a.3),
19950            b,
19951        ))
19952    }
19953}
19954#[doc = "Table look-up"]
19955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19956#[inline]
19957#[cfg(target_endian = "big")]
19958#[target_feature(enable = "neon")]
19959#[cfg_attr(test, assert_instr(tbl))]
19960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19961pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19962    let mut a: uint8x16x4_t = a;
19963    a.0 = unsafe {
19964        simd_shuffle!(
19965            a.0,
19966            a.0,
19967            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19968        )
19969    };
19970    a.1 = unsafe {
19971        simd_shuffle!(
19972            a.1,
19973            a.1,
19974            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19975        )
19976    };
19977    a.2 = unsafe {
19978        simd_shuffle!(
19979            a.2,
19980            a.2,
19981            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19982        )
19983    };
19984    a.3 = unsafe {
19985        simd_shuffle!(
19986            a.3,
19987            a.3,
19988            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19989        )
19990    };
19991    let b: uint8x16_t =
19992        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19993    unsafe {
19994        let ret_val: uint8x16_t = transmute(vqtbl4q(
19995            transmute(a.0),
19996            transmute(a.1),
19997            transmute(a.2),
19998            transmute(a.3),
19999            b,
20000        ));
20001        simd_shuffle!(
20002            ret_val,
20003            ret_val,
20004            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20005        )
20006    }
20007}
20008#[doc = "Table look-up"]
20009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20010#[inline]
20011#[cfg(target_endian = "little")]
20012#[target_feature(enable = "neon")]
20013#[cfg_attr(test, assert_instr(tbl))]
20014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20015pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20016    unsafe {
20017        transmute(vqtbl4(
20018            transmute(a.0),
20019            transmute(a.1),
20020            transmute(a.2),
20021            transmute(a.3),
20022            b,
20023        ))
20024    }
20025}
20026#[doc = "Table look-up"]
20027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20028#[inline]
20029#[cfg(target_endian = "big")]
20030#[target_feature(enable = "neon")]
20031#[cfg_attr(test, assert_instr(tbl))]
20032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20033pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20034    let mut a: poly8x16x4_t = a;
20035    a.0 = unsafe {
20036        simd_shuffle!(
20037            a.0,
20038            a.0,
20039            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20040        )
20041    };
20042    a.1 = unsafe {
20043        simd_shuffle!(
20044            a.1,
20045            a.1,
20046            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20047        )
20048    };
20049    a.2 = unsafe {
20050        simd_shuffle!(
20051            a.2,
20052            a.2,
20053            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20054        )
20055    };
20056    a.3 = unsafe {
20057        simd_shuffle!(
20058            a.3,
20059            a.3,
20060            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20061        )
20062    };
20063    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20064    unsafe {
20065        let ret_val: poly8x8_t = transmute(vqtbl4(
20066            transmute(a.0),
20067            transmute(a.1),
20068            transmute(a.2),
20069            transmute(a.3),
20070            b,
20071        ));
20072        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20073    }
20074}
20075#[doc = "Table look-up"]
20076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20077#[inline]
20078#[cfg(target_endian = "little")]
20079#[target_feature(enable = "neon")]
20080#[cfg_attr(test, assert_instr(tbl))]
20081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20082pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20083    unsafe {
20084        transmute(vqtbl4q(
20085            transmute(a.0),
20086            transmute(a.1),
20087            transmute(a.2),
20088            transmute(a.3),
20089            b,
20090        ))
20091    }
20092}
20093#[doc = "Table look-up"]
20094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20095#[inline]
20096#[cfg(target_endian = "big")]
20097#[target_feature(enable = "neon")]
20098#[cfg_attr(test, assert_instr(tbl))]
20099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20100pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20101    let mut a: poly8x16x4_t = a;
20102    a.0 = unsafe {
20103        simd_shuffle!(
20104            a.0,
20105            a.0,
20106            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20107        )
20108    };
20109    a.1 = unsafe {
20110        simd_shuffle!(
20111            a.1,
20112            a.1,
20113            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20114        )
20115    };
20116    a.2 = unsafe {
20117        simd_shuffle!(
20118            a.2,
20119            a.2,
20120            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20121        )
20122    };
20123    a.3 = unsafe {
20124        simd_shuffle!(
20125            a.3,
20126            a.3,
20127            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20128        )
20129    };
20130    let b: uint8x16_t =
20131        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20132    unsafe {
20133        let ret_val: poly8x16_t = transmute(vqtbl4q(
20134            transmute(a.0),
20135            transmute(a.1),
20136            transmute(a.2),
20137            transmute(a.3),
20138            b,
20139        ));
20140        simd_shuffle!(
20141            ret_val,
20142            ret_val,
20143            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20144        )
20145    }
20146}
20147#[doc = "Extended table look-up"]
20148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20149#[inline]
20150#[target_feature(enable = "neon")]
20151#[cfg_attr(test, assert_instr(tbx))]
20152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20153fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20154    unsafe extern "unadjusted" {
20155        #[cfg_attr(
20156            any(target_arch = "aarch64", target_arch = "arm64ec"),
20157            link_name = "llvm.aarch64.neon.tbx1.v8i8"
20158        )]
20159        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20160    }
20161    unsafe { _vqtbx1(a, b, c) }
20162}
20163#[doc = "Extended table look-up"]
20164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20165#[inline]
20166#[target_feature(enable = "neon")]
20167#[cfg_attr(test, assert_instr(tbx))]
20168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20169fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20170    unsafe extern "unadjusted" {
20171        #[cfg_attr(
20172            any(target_arch = "aarch64", target_arch = "arm64ec"),
20173            link_name = "llvm.aarch64.neon.tbx1.v16i8"
20174        )]
20175        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20176    }
20177    unsafe { _vqtbx1q(a, b, c) }
20178}
20179#[doc = "Extended table look-up"]
20180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20181#[inline]
20182#[target_feature(enable = "neon")]
20183#[cfg_attr(test, assert_instr(tbx))]
20184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20185pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20186    vqtbx1(a, b, c)
20187}
20188#[doc = "Extended table look-up"]
20189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20190#[inline]
20191#[target_feature(enable = "neon")]
20192#[cfg_attr(test, assert_instr(tbx))]
20193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20194pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20195    vqtbx1q(a, b, c)
20196}
20197#[doc = "Extended table look-up"]
20198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20199#[inline]
20200#[target_feature(enable = "neon")]
20201#[cfg_attr(test, assert_instr(tbx))]
20202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20203pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20204    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20205}
20206#[doc = "Extended table look-up"]
20207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20208#[inline]
20209#[target_feature(enable = "neon")]
20210#[cfg_attr(test, assert_instr(tbx))]
20211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20212pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20213    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20214}
20215#[doc = "Extended table look-up"]
20216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20217#[inline]
20218#[target_feature(enable = "neon")]
20219#[cfg_attr(test, assert_instr(tbx))]
20220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20221pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20222    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20223}
20224#[doc = "Extended table look-up"]
20225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20226#[inline]
20227#[target_feature(enable = "neon")]
20228#[cfg_attr(test, assert_instr(tbx))]
20229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20230pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20231    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20232}
20233#[doc = "Extended table look-up"]
20234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20235#[inline]
20236#[target_feature(enable = "neon")]
20237#[cfg_attr(test, assert_instr(tbx))]
20238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20239fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20240    unsafe extern "unadjusted" {
20241        #[cfg_attr(
20242            any(target_arch = "aarch64", target_arch = "arm64ec"),
20243            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20244        )]
20245        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20246    }
20247    unsafe { _vqtbx2(a, b, c, d) }
20248}
20249#[doc = "Extended table look-up"]
20250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20251#[inline]
20252#[target_feature(enable = "neon")]
20253#[cfg_attr(test, assert_instr(tbx))]
20254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20255fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20256    unsafe extern "unadjusted" {
20257        #[cfg_attr(
20258            any(target_arch = "aarch64", target_arch = "arm64ec"),
20259            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20260        )]
20261        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20262    }
20263    unsafe { _vqtbx2q(a, b, c, d) }
20264}
20265#[doc = "Extended table look-up"]
20266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20267#[inline]
20268#[target_feature(enable = "neon")]
20269#[cfg_attr(test, assert_instr(tbx))]
20270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20271pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20272    vqtbx2(a, b.0, b.1, c)
20273}
20274#[doc = "Extended table look-up"]
20275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20276#[inline]
20277#[target_feature(enable = "neon")]
20278#[cfg_attr(test, assert_instr(tbx))]
20279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20280pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20281    vqtbx2q(a, b.0, b.1, c)
20282}
20283#[doc = "Extended table look-up"]
20284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20285#[inline]
20286#[cfg(target_endian = "little")]
20287#[target_feature(enable = "neon")]
20288#[cfg_attr(test, assert_instr(tbx))]
20289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20290pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20291    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20292}
20293#[doc = "Extended table look-up"]
20294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20295#[inline]
20296#[cfg(target_endian = "big")]
20297#[target_feature(enable = "neon")]
20298#[cfg_attr(test, assert_instr(tbx))]
20299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20300pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20301    let mut b: uint8x16x2_t = b;
20302    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20303    b.0 = unsafe {
20304        simd_shuffle!(
20305            b.0,
20306            b.0,
20307            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20308        )
20309    };
20310    b.1 = unsafe {
20311        simd_shuffle!(
20312            b.1,
20313            b.1,
20314            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20315        )
20316    };
20317    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20318    unsafe {
20319        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20320        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20321    }
20322}
20323#[doc = "Extended table look-up"]
20324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20325#[inline]
20326#[cfg(target_endian = "little")]
20327#[target_feature(enable = "neon")]
20328#[cfg_attr(test, assert_instr(tbx))]
20329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20330pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20331    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20332}
20333#[doc = "Extended table look-up"]
20334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20335#[inline]
20336#[cfg(target_endian = "big")]
20337#[target_feature(enable = "neon")]
20338#[cfg_attr(test, assert_instr(tbx))]
20339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20340pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20341    let mut b: uint8x16x2_t = b;
20342    let a: uint8x16_t =
20343        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20344    b.0 = unsafe {
20345        simd_shuffle!(
20346            b.0,
20347            b.0,
20348            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20349        )
20350    };
20351    b.1 = unsafe {
20352        simd_shuffle!(
20353            b.1,
20354            b.1,
20355            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20356        )
20357    };
20358    let c: uint8x16_t =
20359        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20360    unsafe {
20361        let ret_val: uint8x16_t =
20362            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20363        simd_shuffle!(
20364            ret_val,
20365            ret_val,
20366            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20367        )
20368    }
20369}
20370#[doc = "Extended table look-up"]
20371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20372#[inline]
20373#[cfg(target_endian = "little")]
20374#[target_feature(enable = "neon")]
20375#[cfg_attr(test, assert_instr(tbx))]
20376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20377pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20378    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20379}
20380#[doc = "Extended table look-up"]
20381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20382#[inline]
20383#[cfg(target_endian = "big")]
20384#[target_feature(enable = "neon")]
20385#[cfg_attr(test, assert_instr(tbx))]
20386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20387pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20388    let mut b: poly8x16x2_t = b;
20389    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20390    b.0 = unsafe {
20391        simd_shuffle!(
20392            b.0,
20393            b.0,
20394            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20395        )
20396    };
20397    b.1 = unsafe {
20398        simd_shuffle!(
20399            b.1,
20400            b.1,
20401            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20402        )
20403    };
20404    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20405    unsafe {
20406        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20407        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20408    }
20409}
20410#[doc = "Extended table look-up"]
20411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20412#[inline]
20413#[cfg(target_endian = "little")]
20414#[target_feature(enable = "neon")]
20415#[cfg_attr(test, assert_instr(tbx))]
20416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20417pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20418    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20419}
20420#[doc = "Extended table look-up"]
20421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20422#[inline]
20423#[cfg(target_endian = "big")]
20424#[target_feature(enable = "neon")]
20425#[cfg_attr(test, assert_instr(tbx))]
20426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20427pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20428    let mut b: poly8x16x2_t = b;
20429    let a: poly8x16_t =
20430        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20431    b.0 = unsafe {
20432        simd_shuffle!(
20433            b.0,
20434            b.0,
20435            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20436        )
20437    };
20438    b.1 = unsafe {
20439        simd_shuffle!(
20440            b.1,
20441            b.1,
20442            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20443        )
20444    };
20445    let c: uint8x16_t =
20446        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20447    unsafe {
20448        let ret_val: poly8x16_t =
20449            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20450        simd_shuffle!(
20451            ret_val,
20452            ret_val,
20453            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20454        )
20455    }
20456}
20457#[doc = "Extended table look-up"]
20458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20459#[inline]
20460#[target_feature(enable = "neon")]
20461#[cfg_attr(test, assert_instr(tbx))]
20462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20463fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20464    unsafe extern "unadjusted" {
20465        #[cfg_attr(
20466            any(target_arch = "aarch64", target_arch = "arm64ec"),
20467            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20468        )]
20469        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20470            -> int8x8_t;
20471    }
20472    unsafe { _vqtbx3(a, b, c, d, e) }
20473}
20474#[doc = "Extended table look-up"]
20475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20476#[inline]
20477#[target_feature(enable = "neon")]
20478#[cfg_attr(test, assert_instr(tbx))]
20479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20480fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20481    unsafe extern "unadjusted" {
20482        #[cfg_attr(
20483            any(target_arch = "aarch64", target_arch = "arm64ec"),
20484            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20485        )]
20486        fn _vqtbx3q(
20487            a: int8x16_t,
20488            b: int8x16_t,
20489            c: int8x16_t,
20490            d: int8x16_t,
20491            e: uint8x16_t,
20492        ) -> int8x16_t;
20493    }
20494    unsafe { _vqtbx3q(a, b, c, d, e) }
20495}
20496#[doc = "Extended table look-up"]
20497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20498#[inline]
20499#[target_feature(enable = "neon")]
20500#[cfg_attr(test, assert_instr(tbx))]
20501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20502pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20503    vqtbx3(a, b.0, b.1, b.2, c)
20504}
20505#[doc = "Extended table look-up"]
20506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20507#[inline]
20508#[target_feature(enable = "neon")]
20509#[cfg_attr(test, assert_instr(tbx))]
20510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20511pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20512    vqtbx3q(a, b.0, b.1, b.2, c)
20513}
20514#[doc = "Extended table look-up"]
20515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20516#[inline]
20517#[cfg(target_endian = "little")]
20518#[target_feature(enable = "neon")]
20519#[cfg_attr(test, assert_instr(tbx))]
20520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20521pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20522    unsafe {
20523        transmute(vqtbx3(
20524            transmute(a),
20525            transmute(b.0),
20526            transmute(b.1),
20527            transmute(b.2),
20528            c,
20529        ))
20530    }
20531}
20532#[doc = "Extended table look-up"]
20533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20534#[inline]
20535#[cfg(target_endian = "big")]
20536#[target_feature(enable = "neon")]
20537#[cfg_attr(test, assert_instr(tbx))]
20538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20539pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20540    let mut b: uint8x16x3_t = b;
20541    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20542    b.0 = unsafe {
20543        simd_shuffle!(
20544            b.0,
20545            b.0,
20546            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20547        )
20548    };
20549    b.1 = unsafe {
20550        simd_shuffle!(
20551            b.1,
20552            b.1,
20553            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20554        )
20555    };
20556    b.2 = unsafe {
20557        simd_shuffle!(
20558            b.2,
20559            b.2,
20560            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20561        )
20562    };
20563    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20564    unsafe {
20565        let ret_val: uint8x8_t = transmute(vqtbx3(
20566            transmute(a),
20567            transmute(b.0),
20568            transmute(b.1),
20569            transmute(b.2),
20570            c,
20571        ));
20572        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20573    }
20574}
20575#[doc = "Extended table look-up"]
20576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20577#[inline]
20578#[cfg(target_endian = "little")]
20579#[target_feature(enable = "neon")]
20580#[cfg_attr(test, assert_instr(tbx))]
20581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20582pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20583    unsafe {
20584        transmute(vqtbx3q(
20585            transmute(a),
20586            transmute(b.0),
20587            transmute(b.1),
20588            transmute(b.2),
20589            c,
20590        ))
20591    }
20592}
20593#[doc = "Extended table look-up"]
20594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20595#[inline]
20596#[cfg(target_endian = "big")]
20597#[target_feature(enable = "neon")]
20598#[cfg_attr(test, assert_instr(tbx))]
20599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20600pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20601    let mut b: uint8x16x3_t = b;
20602    let a: uint8x16_t =
20603        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20604    b.0 = unsafe {
20605        simd_shuffle!(
20606            b.0,
20607            b.0,
20608            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20609        )
20610    };
20611    b.1 = unsafe {
20612        simd_shuffle!(
20613            b.1,
20614            b.1,
20615            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20616        )
20617    };
20618    b.2 = unsafe {
20619        simd_shuffle!(
20620            b.2,
20621            b.2,
20622            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20623        )
20624    };
20625    let c: uint8x16_t =
20626        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20627    unsafe {
20628        let ret_val: uint8x16_t = transmute(vqtbx3q(
20629            transmute(a),
20630            transmute(b.0),
20631            transmute(b.1),
20632            transmute(b.2),
20633            c,
20634        ));
20635        simd_shuffle!(
20636            ret_val,
20637            ret_val,
20638            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20639        )
20640    }
20641}
20642#[doc = "Extended table look-up"]
20643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20644#[inline]
20645#[cfg(target_endian = "little")]
20646#[target_feature(enable = "neon")]
20647#[cfg_attr(test, assert_instr(tbx))]
20648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20649pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20650    unsafe {
20651        transmute(vqtbx3(
20652            transmute(a),
20653            transmute(b.0),
20654            transmute(b.1),
20655            transmute(b.2),
20656            c,
20657        ))
20658    }
20659}
20660#[doc = "Extended table look-up"]
20661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20662#[inline]
20663#[cfg(target_endian = "big")]
20664#[target_feature(enable = "neon")]
20665#[cfg_attr(test, assert_instr(tbx))]
20666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20667pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20668    let mut b: poly8x16x3_t = b;
20669    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20670    b.0 = unsafe {
20671        simd_shuffle!(
20672            b.0,
20673            b.0,
20674            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20675        )
20676    };
20677    b.1 = unsafe {
20678        simd_shuffle!(
20679            b.1,
20680            b.1,
20681            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20682        )
20683    };
20684    b.2 = unsafe {
20685        simd_shuffle!(
20686            b.2,
20687            b.2,
20688            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20689        )
20690    };
20691    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20692    unsafe {
20693        let ret_val: poly8x8_t = transmute(vqtbx3(
20694            transmute(a),
20695            transmute(b.0),
20696            transmute(b.1),
20697            transmute(b.2),
20698            c,
20699        ));
20700        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20701    }
20702}
20703#[doc = "Extended table look-up"]
20704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20705#[inline]
20706#[cfg(target_endian = "little")]
20707#[target_feature(enable = "neon")]
20708#[cfg_attr(test, assert_instr(tbx))]
20709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20710pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20711    unsafe {
20712        transmute(vqtbx3q(
20713            transmute(a),
20714            transmute(b.0),
20715            transmute(b.1),
20716            transmute(b.2),
20717            c,
20718        ))
20719    }
20720}
20721#[doc = "Extended table look-up"]
20722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20723#[inline]
20724#[cfg(target_endian = "big")]
20725#[target_feature(enable = "neon")]
20726#[cfg_attr(test, assert_instr(tbx))]
20727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20728pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20729    let mut b: poly8x16x3_t = b;
20730    let a: poly8x16_t =
20731        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20732    b.0 = unsafe {
20733        simd_shuffle!(
20734            b.0,
20735            b.0,
20736            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20737        )
20738    };
20739    b.1 = unsafe {
20740        simd_shuffle!(
20741            b.1,
20742            b.1,
20743            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20744        )
20745    };
20746    b.2 = unsafe {
20747        simd_shuffle!(
20748            b.2,
20749            b.2,
20750            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20751        )
20752    };
20753    let c: uint8x16_t =
20754        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20755    unsafe {
20756        let ret_val: poly8x16_t = transmute(vqtbx3q(
20757            transmute(a),
20758            transmute(b.0),
20759            transmute(b.1),
20760            transmute(b.2),
20761            c,
20762        ));
20763        simd_shuffle!(
20764            ret_val,
20765            ret_val,
20766            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20767        )
20768    }
20769}
20770#[doc = "Extended table look-up"]
20771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20772#[inline]
20773#[target_feature(enable = "neon")]
20774#[cfg_attr(test, assert_instr(tbx))]
20775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20776fn vqtbx4(
20777    a: int8x8_t,
20778    b: int8x16_t,
20779    c: int8x16_t,
20780    d: int8x16_t,
20781    e: int8x16_t,
20782    f: uint8x8_t,
20783) -> int8x8_t {
20784    unsafe extern "unadjusted" {
20785        #[cfg_attr(
20786            any(target_arch = "aarch64", target_arch = "arm64ec"),
20787            link_name = "llvm.aarch64.neon.tbx4.v8i8"
20788        )]
20789        fn _vqtbx4(
20790            a: int8x8_t,
20791            b: int8x16_t,
20792            c: int8x16_t,
20793            d: int8x16_t,
20794            e: int8x16_t,
20795            f: uint8x8_t,
20796        ) -> int8x8_t;
20797    }
20798    unsafe { _vqtbx4(a, b, c, d, e, f) }
20799}
20800#[doc = "Extended table look-up"]
20801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
20802#[inline]
20803#[target_feature(enable = "neon")]
20804#[cfg_attr(test, assert_instr(tbx))]
20805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20806fn vqtbx4q(
20807    a: int8x16_t,
20808    b: int8x16_t,
20809    c: int8x16_t,
20810    d: int8x16_t,
20811    e: int8x16_t,
20812    f: uint8x16_t,
20813) -> int8x16_t {
20814    unsafe extern "unadjusted" {
20815        #[cfg_attr(
20816            any(target_arch = "aarch64", target_arch = "arm64ec"),
20817            link_name = "llvm.aarch64.neon.tbx4.v16i8"
20818        )]
20819        fn _vqtbx4q(
20820            a: int8x16_t,
20821            b: int8x16_t,
20822            c: int8x16_t,
20823            d: int8x16_t,
20824            e: int8x16_t,
20825            f: uint8x16_t,
20826        ) -> int8x16_t;
20827    }
20828    unsafe { _vqtbx4q(a, b, c, d, e, f) }
20829}
20830#[doc = "Extended table look-up"]
20831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
20832#[inline]
20833#[target_feature(enable = "neon")]
20834#[cfg_attr(test, assert_instr(tbx))]
20835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20836pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
20837    vqtbx4(a, b.0, b.1, b.2, b.3, c)
20838}
20839#[doc = "Extended table look-up"]
20840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
20841#[inline]
20842#[target_feature(enable = "neon")]
20843#[cfg_attr(test, assert_instr(tbx))]
20844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20845pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
20846    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
20847}
20848#[doc = "Extended table look-up"]
20849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20850#[inline]
20851#[cfg(target_endian = "little")]
20852#[target_feature(enable = "neon")]
20853#[cfg_attr(test, assert_instr(tbx))]
20854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20855pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20856    unsafe {
20857        transmute(vqtbx4(
20858            transmute(a),
20859            transmute(b.0),
20860            transmute(b.1),
20861            transmute(b.2),
20862            transmute(b.3),
20863            c,
20864        ))
20865    }
20866}
20867#[doc = "Extended table look-up"]
20868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20869#[inline]
20870#[cfg(target_endian = "big")]
20871#[target_feature(enable = "neon")]
20872#[cfg_attr(test, assert_instr(tbx))]
20873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20874pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20875    let mut b: uint8x16x4_t = b;
20876    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20877    b.0 = unsafe {
20878        simd_shuffle!(
20879            b.0,
20880            b.0,
20881            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20882        )
20883    };
20884    b.1 = unsafe {
20885        simd_shuffle!(
20886            b.1,
20887            b.1,
20888            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20889        )
20890    };
20891    b.2 = unsafe {
20892        simd_shuffle!(
20893            b.2,
20894            b.2,
20895            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20896        )
20897    };
20898    b.3 = unsafe {
20899        simd_shuffle!(
20900            b.3,
20901            b.3,
20902            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20903        )
20904    };
20905    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20906    unsafe {
20907        let ret_val: uint8x8_t = transmute(vqtbx4(
20908            transmute(a),
20909            transmute(b.0),
20910            transmute(b.1),
20911            transmute(b.2),
20912            transmute(b.3),
20913            c,
20914        ));
20915        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20916    }
20917}
20918#[doc = "Extended table look-up"]
20919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20920#[inline]
20921#[cfg(target_endian = "little")]
20922#[target_feature(enable = "neon")]
20923#[cfg_attr(test, assert_instr(tbx))]
20924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20925pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20926    unsafe {
20927        transmute(vqtbx4q(
20928            transmute(a),
20929            transmute(b.0),
20930            transmute(b.1),
20931            transmute(b.2),
20932            transmute(b.3),
20933            c,
20934        ))
20935    }
20936}
20937#[doc = "Extended table look-up"]
20938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20939#[inline]
20940#[cfg(target_endian = "big")]
20941#[target_feature(enable = "neon")]
20942#[cfg_attr(test, assert_instr(tbx))]
20943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20944pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20945    let mut b: uint8x16x4_t = b;
20946    let a: uint8x16_t =
20947        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20948    b.0 = unsafe {
20949        simd_shuffle!(
20950            b.0,
20951            b.0,
20952            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20953        )
20954    };
20955    b.1 = unsafe {
20956        simd_shuffle!(
20957            b.1,
20958            b.1,
20959            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20960        )
20961    };
20962    b.2 = unsafe {
20963        simd_shuffle!(
20964            b.2,
20965            b.2,
20966            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20967        )
20968    };
20969    b.3 = unsafe {
20970        simd_shuffle!(
20971            b.3,
20972            b.3,
20973            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20974        )
20975    };
20976    let c: uint8x16_t =
20977        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20978    unsafe {
20979        let ret_val: uint8x16_t = transmute(vqtbx4q(
20980            transmute(a),
20981            transmute(b.0),
20982            transmute(b.1),
20983            transmute(b.2),
20984            transmute(b.3),
20985            c,
20986        ));
20987        simd_shuffle!(
20988            ret_val,
20989            ret_val,
20990            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20991        )
20992    }
20993}
20994#[doc = "Extended table look-up"]
20995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
20996#[inline]
20997#[cfg(target_endian = "little")]
20998#[target_feature(enable = "neon")]
20999#[cfg_attr(test, assert_instr(tbx))]
21000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21001pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21002    unsafe {
21003        transmute(vqtbx4(
21004            transmute(a),
21005            transmute(b.0),
21006            transmute(b.1),
21007            transmute(b.2),
21008            transmute(b.3),
21009            c,
21010        ))
21011    }
21012}
21013#[doc = "Extended table look-up"]
21014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21015#[inline]
21016#[cfg(target_endian = "big")]
21017#[target_feature(enable = "neon")]
21018#[cfg_attr(test, assert_instr(tbx))]
21019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21020pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21021    let mut b: poly8x16x4_t = b;
21022    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21023    b.0 = unsafe {
21024        simd_shuffle!(
21025            b.0,
21026            b.0,
21027            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21028        )
21029    };
21030    b.1 = unsafe {
21031        simd_shuffle!(
21032            b.1,
21033            b.1,
21034            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21035        )
21036    };
21037    b.2 = unsafe {
21038        simd_shuffle!(
21039            b.2,
21040            b.2,
21041            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21042        )
21043    };
21044    b.3 = unsafe {
21045        simd_shuffle!(
21046            b.3,
21047            b.3,
21048            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21049        )
21050    };
21051    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21052    unsafe {
21053        let ret_val: poly8x8_t = transmute(vqtbx4(
21054            transmute(a),
21055            transmute(b.0),
21056            transmute(b.1),
21057            transmute(b.2),
21058            transmute(b.3),
21059            c,
21060        ));
21061        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21062    }
21063}
21064#[doc = "Extended table look-up"]
21065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21066#[inline]
21067#[cfg(target_endian = "little")]
21068#[target_feature(enable = "neon")]
21069#[cfg_attr(test, assert_instr(tbx))]
21070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21071pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21072    unsafe {
21073        transmute(vqtbx4q(
21074            transmute(a),
21075            transmute(b.0),
21076            transmute(b.1),
21077            transmute(b.2),
21078            transmute(b.3),
21079            c,
21080        ))
21081    }
21082}
21083#[doc = "Extended table look-up"]
21084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21085#[inline]
21086#[cfg(target_endian = "big")]
21087#[target_feature(enable = "neon")]
21088#[cfg_attr(test, assert_instr(tbx))]
21089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21090pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21091    let mut b: poly8x16x4_t = b;
21092    let a: poly8x16_t =
21093        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21094    b.0 = unsafe {
21095        simd_shuffle!(
21096            b.0,
21097            b.0,
21098            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21099        )
21100    };
21101    b.1 = unsafe {
21102        simd_shuffle!(
21103            b.1,
21104            b.1,
21105            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21106        )
21107    };
21108    b.2 = unsafe {
21109        simd_shuffle!(
21110            b.2,
21111            b.2,
21112            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21113        )
21114    };
21115    b.3 = unsafe {
21116        simd_shuffle!(
21117            b.3,
21118            b.3,
21119            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21120        )
21121    };
21122    let c: uint8x16_t =
21123        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21124    unsafe {
21125        let ret_val: poly8x16_t = transmute(vqtbx4q(
21126            transmute(a),
21127            transmute(b.0),
21128            transmute(b.1),
21129            transmute(b.2),
21130            transmute(b.3),
21131            c,
21132        ));
21133        simd_shuffle!(
21134            ret_val,
21135            ret_val,
21136            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21137        )
21138    }
21139}
21140#[doc = "Rotate and exclusive OR"]
21141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21142#[inline]
21143#[target_feature(enable = "neon,sha3")]
21144#[cfg_attr(test, assert_instr(rax1))]
21145#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21146pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21147    unsafe extern "unadjusted" {
21148        #[cfg_attr(
21149            any(target_arch = "aarch64", target_arch = "arm64ec"),
21150            link_name = "llvm.aarch64.crypto.rax1"
21151        )]
21152        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21153    }
21154    unsafe { _vrax1q_u64(a, b) }
21155}
21156#[doc = "Reverse bit order"]
21157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21158#[inline]
21159#[target_feature(enable = "neon")]
21160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21161#[cfg_attr(test, assert_instr(rbit))]
21162pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21163    unsafe { simd_bitreverse(a) }
21164}
21165#[doc = "Reverse bit order"]
21166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21167#[inline]
21168#[target_feature(enable = "neon")]
21169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21170#[cfg_attr(test, assert_instr(rbit))]
21171pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21172    unsafe { simd_bitreverse(a) }
21173}
21174#[doc = "Reverse bit order"]
21175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21176#[inline]
21177#[cfg(target_endian = "little")]
21178#[target_feature(enable = "neon")]
21179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21180#[cfg_attr(test, assert_instr(rbit))]
21181pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21182    unsafe { transmute(vrbit_s8(transmute(a))) }
21183}
21184#[doc = "Reverse bit order"]
21185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21186#[inline]
21187#[cfg(target_endian = "big")]
21188#[target_feature(enable = "neon")]
21189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21190#[cfg_attr(test, assert_instr(rbit))]
21191pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21192    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21193    unsafe {
21194        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21195        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21196    }
21197}
21198#[doc = "Reverse bit order"]
21199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21200#[inline]
21201#[cfg(target_endian = "little")]
21202#[target_feature(enable = "neon")]
21203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21204#[cfg_attr(test, assert_instr(rbit))]
21205pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21206    unsafe { transmute(vrbitq_s8(transmute(a))) }
21207}
21208#[doc = "Reverse bit order"]
21209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21210#[inline]
21211#[cfg(target_endian = "big")]
21212#[target_feature(enable = "neon")]
21213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21214#[cfg_attr(test, assert_instr(rbit))]
21215pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21216    let a: uint8x16_t =
21217        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21218    unsafe {
21219        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21220        simd_shuffle!(
21221            ret_val,
21222            ret_val,
21223            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21224        )
21225    }
21226}
21227#[doc = "Reverse bit order"]
21228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21229#[inline]
21230#[cfg(target_endian = "little")]
21231#[target_feature(enable = "neon")]
21232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21233#[cfg_attr(test, assert_instr(rbit))]
21234pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21235    unsafe { transmute(vrbit_s8(transmute(a))) }
21236}
21237#[doc = "Reverse bit order"]
21238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21239#[inline]
21240#[cfg(target_endian = "big")]
21241#[target_feature(enable = "neon")]
21242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21243#[cfg_attr(test, assert_instr(rbit))]
21244pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21245    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21246    unsafe {
21247        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21248        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21249    }
21250}
21251#[doc = "Reverse bit order"]
21252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21253#[inline]
21254#[cfg(target_endian = "little")]
21255#[target_feature(enable = "neon")]
21256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21257#[cfg_attr(test, assert_instr(rbit))]
21258pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21259    unsafe { transmute(vrbitq_s8(transmute(a))) }
21260}
21261#[doc = "Reverse bit order"]
21262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21263#[inline]
21264#[cfg(target_endian = "big")]
21265#[target_feature(enable = "neon")]
21266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21267#[cfg_attr(test, assert_instr(rbit))]
21268pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21269    let a: poly8x16_t =
21270        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21271    unsafe {
21272        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21273        simd_shuffle!(
21274            ret_val,
21275            ret_val,
21276            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21277        )
21278    }
21279}
21280#[doc = "Reciprocal estimate."]
21281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21282#[inline]
21283#[target_feature(enable = "neon")]
21284#[cfg_attr(test, assert_instr(frecpe))]
21285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21286pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21287    unsafe extern "unadjusted" {
21288        #[cfg_attr(
21289            any(target_arch = "aarch64", target_arch = "arm64ec"),
21290            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21291        )]
21292        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21293    }
21294    unsafe { _vrecpe_f64(a) }
21295}
21296#[doc = "Reciprocal estimate."]
21297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21298#[inline]
21299#[target_feature(enable = "neon")]
21300#[cfg_attr(test, assert_instr(frecpe))]
21301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21302pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21303    unsafe extern "unadjusted" {
21304        #[cfg_attr(
21305            any(target_arch = "aarch64", target_arch = "arm64ec"),
21306            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21307        )]
21308        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21309    }
21310    unsafe { _vrecpeq_f64(a) }
21311}
21312#[doc = "Reciprocal estimate."]
21313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21314#[inline]
21315#[target_feature(enable = "neon")]
21316#[cfg_attr(test, assert_instr(frecpe))]
21317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21318pub fn vrecped_f64(a: f64) -> f64 {
21319    unsafe extern "unadjusted" {
21320        #[cfg_attr(
21321            any(target_arch = "aarch64", target_arch = "arm64ec"),
21322            link_name = "llvm.aarch64.neon.frecpe.f64"
21323        )]
21324        fn _vrecped_f64(a: f64) -> f64;
21325    }
21326    unsafe { _vrecped_f64(a) }
21327}
21328#[doc = "Reciprocal estimate."]
21329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21330#[inline]
21331#[target_feature(enable = "neon")]
21332#[cfg_attr(test, assert_instr(frecpe))]
21333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21334pub fn vrecpes_f32(a: f32) -> f32 {
21335    unsafe extern "unadjusted" {
21336        #[cfg_attr(
21337            any(target_arch = "aarch64", target_arch = "arm64ec"),
21338            link_name = "llvm.aarch64.neon.frecpe.f32"
21339        )]
21340        fn _vrecpes_f32(a: f32) -> f32;
21341    }
21342    unsafe { _vrecpes_f32(a) }
21343}
21344#[doc = "Reciprocal estimate."]
21345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21346#[inline]
21347#[cfg_attr(test, assert_instr(frecpe))]
21348#[target_feature(enable = "neon,fp16")]
21349#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21350#[cfg(not(target_arch = "arm64ec"))]
21351pub fn vrecpeh_f16(a: f16) -> f16 {
21352    unsafe extern "unadjusted" {
21353        #[cfg_attr(
21354            any(target_arch = "aarch64", target_arch = "arm64ec"),
21355            link_name = "llvm.aarch64.neon.frecpe.f16"
21356        )]
21357        fn _vrecpeh_f16(a: f16) -> f16;
21358    }
21359    unsafe { _vrecpeh_f16(a) }
21360}
21361#[doc = "Floating-point reciprocal step"]
21362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21363#[inline]
21364#[target_feature(enable = "neon")]
21365#[cfg_attr(test, assert_instr(frecps))]
21366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21367pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21368    unsafe extern "unadjusted" {
21369        #[cfg_attr(
21370            any(target_arch = "aarch64", target_arch = "arm64ec"),
21371            link_name = "llvm.aarch64.neon.frecps.v1f64"
21372        )]
21373        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21374    }
21375    unsafe { _vrecps_f64(a, b) }
21376}
21377#[doc = "Floating-point reciprocal step"]
21378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21379#[inline]
21380#[target_feature(enable = "neon")]
21381#[cfg_attr(test, assert_instr(frecps))]
21382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21383pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21384    unsafe extern "unadjusted" {
21385        #[cfg_attr(
21386            any(target_arch = "aarch64", target_arch = "arm64ec"),
21387            link_name = "llvm.aarch64.neon.frecps.v2f64"
21388        )]
21389        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21390    }
21391    unsafe { _vrecpsq_f64(a, b) }
21392}
21393#[doc = "Floating-point reciprocal step"]
21394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21395#[inline]
21396#[target_feature(enable = "neon")]
21397#[cfg_attr(test, assert_instr(frecps))]
21398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21399pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21400    unsafe extern "unadjusted" {
21401        #[cfg_attr(
21402            any(target_arch = "aarch64", target_arch = "arm64ec"),
21403            link_name = "llvm.aarch64.neon.frecps.f64"
21404        )]
21405        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21406    }
21407    unsafe { _vrecpsd_f64(a, b) }
21408}
21409#[doc = "Floating-point reciprocal step"]
21410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21411#[inline]
21412#[target_feature(enable = "neon")]
21413#[cfg_attr(test, assert_instr(frecps))]
21414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21415pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21416    unsafe extern "unadjusted" {
21417        #[cfg_attr(
21418            any(target_arch = "aarch64", target_arch = "arm64ec"),
21419            link_name = "llvm.aarch64.neon.frecps.f32"
21420        )]
21421        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21422    }
21423    unsafe { _vrecpss_f32(a, b) }
21424}
21425#[doc = "Floating-point reciprocal step"]
21426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21427#[inline]
21428#[cfg_attr(test, assert_instr(frecps))]
21429#[target_feature(enable = "neon,fp16")]
21430#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21431#[cfg(not(target_arch = "arm64ec"))]
21432pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21433    unsafe extern "unadjusted" {
21434        #[cfg_attr(
21435            any(target_arch = "aarch64", target_arch = "arm64ec"),
21436            link_name = "llvm.aarch64.neon.frecps.f16"
21437        )]
21438        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21439    }
21440    unsafe { _vrecpsh_f16(a, b) }
21441}
21442#[doc = "Floating-point reciprocal exponent"]
21443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21444#[inline]
21445#[target_feature(enable = "neon")]
21446#[cfg_attr(test, assert_instr(frecpx))]
21447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21448pub fn vrecpxd_f64(a: f64) -> f64 {
21449    unsafe extern "unadjusted" {
21450        #[cfg_attr(
21451            any(target_arch = "aarch64", target_arch = "arm64ec"),
21452            link_name = "llvm.aarch64.neon.frecpx.f64"
21453        )]
21454        fn _vrecpxd_f64(a: f64) -> f64;
21455    }
21456    unsafe { _vrecpxd_f64(a) }
21457}
21458#[doc = "Floating-point reciprocal exponent"]
21459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21460#[inline]
21461#[target_feature(enable = "neon")]
21462#[cfg_attr(test, assert_instr(frecpx))]
21463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21464pub fn vrecpxs_f32(a: f32) -> f32 {
21465    unsafe extern "unadjusted" {
21466        #[cfg_attr(
21467            any(target_arch = "aarch64", target_arch = "arm64ec"),
21468            link_name = "llvm.aarch64.neon.frecpx.f32"
21469        )]
21470        fn _vrecpxs_f32(a: f32) -> f32;
21471    }
21472    unsafe { _vrecpxs_f32(a) }
21473}
21474#[doc = "Floating-point reciprocal exponent"]
21475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21476#[inline]
21477#[cfg_attr(test, assert_instr(frecpx))]
21478#[target_feature(enable = "neon,fp16")]
21479#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21480#[cfg(not(target_arch = "arm64ec"))]
21481pub fn vrecpxh_f16(a: f16) -> f16 {
21482    unsafe extern "unadjusted" {
21483        #[cfg_attr(
21484            any(target_arch = "aarch64", target_arch = "arm64ec"),
21485            link_name = "llvm.aarch64.neon.frecpx.f16"
21486        )]
21487        fn _vrecpxh_f16(a: f16) -> f16;
21488    }
21489    unsafe { _vrecpxh_f16(a) }
21490}
21491#[doc = "Vector reinterpret cast operation"]
21492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21493#[inline]
21494#[cfg(target_endian = "little")]
21495#[target_feature(enable = "neon")]
21496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21497#[cfg(not(target_arch = "arm64ec"))]
21498#[cfg_attr(test, assert_instr(nop))]
21499pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21500    unsafe { transmute(a) }
21501}
21502#[doc = "Vector reinterpret cast operation"]
21503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21504#[inline]
21505#[cfg(target_endian = "big")]
21506#[target_feature(enable = "neon")]
21507#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21508#[cfg(not(target_arch = "arm64ec"))]
21509#[cfg_attr(test, assert_instr(nop))]
21510pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21511    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21512    unsafe { transmute(a) }
21513}
21514#[doc = "Vector reinterpret cast operation"]
21515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21516#[inline]
21517#[cfg(target_endian = "little")]
21518#[target_feature(enable = "neon")]
21519#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21520#[cfg(not(target_arch = "arm64ec"))]
21521#[cfg_attr(test, assert_instr(nop))]
21522pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21523    unsafe { transmute(a) }
21524}
21525#[doc = "Vector reinterpret cast operation"]
21526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21527#[inline]
21528#[cfg(target_endian = "big")]
21529#[target_feature(enable = "neon")]
21530#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21531#[cfg(not(target_arch = "arm64ec"))]
21532#[cfg_attr(test, assert_instr(nop))]
21533pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21534    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21535    unsafe {
21536        let ret_val: float64x2_t = transmute(a);
21537        simd_shuffle!(ret_val, ret_val, [1, 0])
21538    }
21539}
21540#[doc = "Vector reinterpret cast operation"]
21541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21542#[inline]
21543#[cfg(target_endian = "little")]
21544#[target_feature(enable = "neon")]
21545#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21546#[cfg(not(target_arch = "arm64ec"))]
21547#[cfg_attr(test, assert_instr(nop))]
21548pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21549    unsafe { transmute(a) }
21550}
21551#[doc = "Vector reinterpret cast operation"]
21552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21553#[inline]
21554#[cfg(target_endian = "big")]
21555#[target_feature(enable = "neon")]
21556#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21557#[cfg(not(target_arch = "arm64ec"))]
21558#[cfg_attr(test, assert_instr(nop))]
21559pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21560    unsafe {
21561        let ret_val: float16x4_t = transmute(a);
21562        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21563    }
21564}
21565#[doc = "Vector reinterpret cast operation"]
21566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21567#[inline]
21568#[cfg(target_endian = "little")]
21569#[target_feature(enable = "neon")]
21570#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21571#[cfg(not(target_arch = "arm64ec"))]
21572#[cfg_attr(test, assert_instr(nop))]
21573pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21574    unsafe { transmute(a) }
21575}
21576#[doc = "Vector reinterpret cast operation"]
21577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21578#[inline]
21579#[cfg(target_endian = "big")]
21580#[target_feature(enable = "neon")]
21581#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21582#[cfg(not(target_arch = "arm64ec"))]
21583#[cfg_attr(test, assert_instr(nop))]
21584pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21585    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21586    unsafe {
21587        let ret_val: float16x8_t = transmute(a);
21588        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21589    }
21590}
21591#[doc = "Vector reinterpret cast operation"]
21592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21593#[inline]
21594#[cfg(target_endian = "little")]
21595#[target_feature(enable = "neon")]
21596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21597#[cfg_attr(test, assert_instr(nop))]
21598pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21599    unsafe { transmute(a) }
21600}
21601#[doc = "Vector reinterpret cast operation"]
21602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21603#[inline]
21604#[cfg(target_endian = "big")]
21605#[target_feature(enable = "neon")]
21606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21607#[cfg_attr(test, assert_instr(nop))]
21608pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21609    unsafe {
21610        let ret_val: float64x2_t = transmute(a);
21611        simd_shuffle!(ret_val, ret_val, [1, 0])
21612    }
21613}
21614#[doc = "Vector reinterpret cast operation"]
21615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21616#[inline]
21617#[cfg(target_endian = "little")]
21618#[target_feature(enable = "neon")]
21619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21620#[cfg_attr(test, assert_instr(nop))]
21621pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21622    unsafe { transmute(a) }
21623}
21624#[doc = "Vector reinterpret cast operation"]
21625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21626#[inline]
21627#[cfg(target_endian = "big")]
21628#[target_feature(enable = "neon")]
21629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21630#[cfg_attr(test, assert_instr(nop))]
21631pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21632    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21633    unsafe { transmute(a) }
21634}
21635#[doc = "Vector reinterpret cast operation"]
21636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21637#[inline]
21638#[cfg(target_endian = "little")]
21639#[target_feature(enable = "neon")]
21640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21641#[cfg_attr(test, assert_instr(nop))]
21642pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21643    unsafe { transmute(a) }
21644}
21645#[doc = "Vector reinterpret cast operation"]
21646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21647#[inline]
21648#[cfg(target_endian = "big")]
21649#[target_feature(enable = "neon")]
21650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21651#[cfg_attr(test, assert_instr(nop))]
21652pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21653    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21654    unsafe { transmute(a) }
21655}
21656#[doc = "Vector reinterpret cast operation"]
21657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21658#[inline]
21659#[cfg(target_endian = "little")]
21660#[target_feature(enable = "neon")]
21661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21662#[cfg_attr(test, assert_instr(nop))]
21663pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21664    unsafe { transmute(a) }
21665}
21666#[doc = "Vector reinterpret cast operation"]
21667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21668#[inline]
21669#[cfg(target_endian = "big")]
21670#[target_feature(enable = "neon")]
21671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21672#[cfg_attr(test, assert_instr(nop))]
21673pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21674    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21675    unsafe {
21676        let ret_val: float64x2_t = transmute(a);
21677        simd_shuffle!(ret_val, ret_val, [1, 0])
21678    }
21679}
21680#[doc = "Vector reinterpret cast operation"]
21681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21682#[inline]
21683#[cfg(target_endian = "little")]
21684#[target_feature(enable = "neon")]
21685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21686#[cfg_attr(test, assert_instr(nop))]
21687pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21688    unsafe { transmute(a) }
21689}
21690#[doc = "Vector reinterpret cast operation"]
21691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21692#[inline]
21693#[cfg(target_endian = "big")]
21694#[target_feature(enable = "neon")]
21695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21696#[cfg_attr(test, assert_instr(nop))]
21697pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21698    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21699    unsafe {
21700        let ret_val: poly64x2_t = transmute(a);
21701        simd_shuffle!(ret_val, ret_val, [1, 0])
21702    }
21703}
21704#[doc = "Vector reinterpret cast operation"]
21705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21706#[inline]
21707#[cfg(target_endian = "little")]
21708#[target_feature(enable = "neon")]
21709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21710#[cfg_attr(test, assert_instr(nop))]
21711pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21712    unsafe { transmute(a) }
21713}
21714#[doc = "Vector reinterpret cast operation"]
21715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21716#[inline]
21717#[cfg(target_endian = "big")]
21718#[target_feature(enable = "neon")]
21719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21720#[cfg_attr(test, assert_instr(nop))]
21721pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21722    unsafe {
21723        let ret_val: float32x2_t = transmute(a);
21724        simd_shuffle!(ret_val, ret_val, [1, 0])
21725    }
21726}
21727#[doc = "Vector reinterpret cast operation"]
21728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21729#[inline]
21730#[cfg(target_endian = "little")]
21731#[target_feature(enable = "neon")]
21732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21733#[cfg_attr(test, assert_instr(nop))]
21734pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21735    unsafe { transmute(a) }
21736}
21737#[doc = "Vector reinterpret cast operation"]
21738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21739#[inline]
21740#[cfg(target_endian = "big")]
21741#[target_feature(enable = "neon")]
21742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21743#[cfg_attr(test, assert_instr(nop))]
21744pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21745    unsafe {
21746        let ret_val: int8x8_t = transmute(a);
21747        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21748    }
21749}
21750#[doc = "Vector reinterpret cast operation"]
21751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21752#[inline]
21753#[cfg(target_endian = "little")]
21754#[target_feature(enable = "neon")]
21755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21756#[cfg_attr(test, assert_instr(nop))]
21757pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21758    unsafe { transmute(a) }
21759}
21760#[doc = "Vector reinterpret cast operation"]
21761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21762#[inline]
21763#[cfg(target_endian = "big")]
21764#[target_feature(enable = "neon")]
21765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21766#[cfg_attr(test, assert_instr(nop))]
21767pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21768    unsafe {
21769        let ret_val: int16x4_t = transmute(a);
21770        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21771    }
21772}
21773#[doc = "Vector reinterpret cast operation"]
21774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21775#[inline]
21776#[cfg(target_endian = "little")]
21777#[target_feature(enable = "neon")]
21778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21779#[cfg_attr(test, assert_instr(nop))]
21780pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21781    unsafe { transmute(a) }
21782}
21783#[doc = "Vector reinterpret cast operation"]
21784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21785#[inline]
21786#[cfg(target_endian = "big")]
21787#[target_feature(enable = "neon")]
21788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21789#[cfg_attr(test, assert_instr(nop))]
21790pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21791    unsafe {
21792        let ret_val: int32x2_t = transmute(a);
21793        simd_shuffle!(ret_val, ret_val, [1, 0])
21794    }
21795}
21796#[doc = "Vector reinterpret cast operation"]
21797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
21798#[inline]
21799#[target_feature(enable = "neon")]
21800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21801#[cfg_attr(test, assert_instr(nop))]
21802pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
21803    unsafe { transmute(a) }
21804}
21805#[doc = "Vector reinterpret cast operation"]
21806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21807#[inline]
21808#[cfg(target_endian = "little")]
21809#[target_feature(enable = "neon")]
21810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21811#[cfg_attr(test, assert_instr(nop))]
21812pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21813    unsafe { transmute(a) }
21814}
21815#[doc = "Vector reinterpret cast operation"]
21816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21817#[inline]
21818#[cfg(target_endian = "big")]
21819#[target_feature(enable = "neon")]
21820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21821#[cfg_attr(test, assert_instr(nop))]
21822pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21823    unsafe {
21824        let ret_val: uint8x8_t = transmute(a);
21825        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21826    }
21827}
21828#[doc = "Vector reinterpret cast operation"]
21829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
21830#[inline]
21831#[cfg(target_endian = "little")]
21832#[target_feature(enable = "neon")]
21833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21834#[cfg_attr(test, assert_instr(nop))]
21835pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
21836    unsafe { transmute(a) }
21837}
21838#[doc = "Vector reinterpret cast operation"]
21839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
21840#[inline]
21841#[cfg(target_endian = "big")]
21842#[target_feature(enable = "neon")]
21843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21844#[cfg_attr(test, assert_instr(nop))]
21845pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
21846    unsafe {
21847        let ret_val: uint16x4_t = transmute(a);
21848        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21849    }
21850}
21851#[doc = "Vector reinterpret cast operation"]
21852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
21853#[inline]
21854#[cfg(target_endian = "little")]
21855#[target_feature(enable = "neon")]
21856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21857#[cfg_attr(test, assert_instr(nop))]
21858pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
21859    unsafe { transmute(a) }
21860}
21861#[doc = "Vector reinterpret cast operation"]
21862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
21863#[inline]
21864#[cfg(target_endian = "big")]
21865#[target_feature(enable = "neon")]
21866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21867#[cfg_attr(test, assert_instr(nop))]
21868pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
21869    unsafe {
21870        let ret_val: uint32x2_t = transmute(a);
21871        simd_shuffle!(ret_val, ret_val, [1, 0])
21872    }
21873}
21874#[doc = "Vector reinterpret cast operation"]
21875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
21876#[inline]
21877#[target_feature(enable = "neon")]
21878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21879#[cfg_attr(test, assert_instr(nop))]
21880pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
21881    unsafe { transmute(a) }
21882}
21883#[doc = "Vector reinterpret cast operation"]
21884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21885#[inline]
21886#[cfg(target_endian = "little")]
21887#[target_feature(enable = "neon")]
21888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21889#[cfg_attr(test, assert_instr(nop))]
21890pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21891    unsafe { transmute(a) }
21892}
21893#[doc = "Vector reinterpret cast operation"]
21894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21895#[inline]
21896#[cfg(target_endian = "big")]
21897#[target_feature(enable = "neon")]
21898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21899#[cfg_attr(test, assert_instr(nop))]
21900pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21901    unsafe {
21902        let ret_val: poly8x8_t = transmute(a);
21903        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21904    }
21905}
21906#[doc = "Vector reinterpret cast operation"]
21907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21908#[inline]
21909#[cfg(target_endian = "little")]
21910#[target_feature(enable = "neon")]
21911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21912#[cfg_attr(test, assert_instr(nop))]
21913pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21914    unsafe { transmute(a) }
21915}
21916#[doc = "Vector reinterpret cast operation"]
21917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21918#[inline]
21919#[cfg(target_endian = "big")]
21920#[target_feature(enable = "neon")]
21921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21922#[cfg_attr(test, assert_instr(nop))]
21923pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21924    unsafe {
21925        let ret_val: poly16x4_t = transmute(a);
21926        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21927    }
21928}
21929#[doc = "Vector reinterpret cast operation"]
21930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
21931#[inline]
21932#[target_feature(enable = "neon")]
21933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21934#[cfg_attr(test, assert_instr(nop))]
21935pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
21936    unsafe { transmute(a) }
21937}
21938#[doc = "Vector reinterpret cast operation"]
21939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21940#[inline]
21941#[cfg(target_endian = "little")]
21942#[target_feature(enable = "neon")]
21943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21944#[cfg_attr(test, assert_instr(nop))]
21945pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21946    unsafe { transmute(a) }
21947}
21948#[doc = "Vector reinterpret cast operation"]
21949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21950#[inline]
21951#[cfg(target_endian = "big")]
21952#[target_feature(enable = "neon")]
21953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21954#[cfg_attr(test, assert_instr(nop))]
21955pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21956    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21957    unsafe { transmute(a) }
21958}
21959#[doc = "Vector reinterpret cast operation"]
21960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21961#[inline]
21962#[cfg(target_endian = "little")]
21963#[target_feature(enable = "neon")]
21964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21965#[cfg_attr(test, assert_instr(nop))]
21966pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21967    unsafe { transmute(a) }
21968}
21969#[doc = "Vector reinterpret cast operation"]
21970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21971#[inline]
21972#[cfg(target_endian = "big")]
21973#[target_feature(enable = "neon")]
21974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21975#[cfg_attr(test, assert_instr(nop))]
21976pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21977    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21978    unsafe {
21979        let ret_val: float32x4_t = transmute(a);
21980        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21981    }
21982}
21983#[doc = "Vector reinterpret cast operation"]
21984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21985#[inline]
21986#[cfg(target_endian = "little")]
21987#[target_feature(enable = "neon")]
21988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21989#[cfg_attr(test, assert_instr(nop))]
21990pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21991    unsafe { transmute(a) }
21992}
21993#[doc = "Vector reinterpret cast operation"]
21994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21995#[inline]
21996#[cfg(target_endian = "big")]
21997#[target_feature(enable = "neon")]
21998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21999#[cfg_attr(test, assert_instr(nop))]
22000pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22001    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22002    unsafe {
22003        let ret_val: int8x16_t = transmute(a);
22004        simd_shuffle!(
22005            ret_val,
22006            ret_val,
22007            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22008        )
22009    }
22010}
22011#[doc = "Vector reinterpret cast operation"]
22012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22013#[inline]
22014#[cfg(target_endian = "little")]
22015#[target_feature(enable = "neon")]
22016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22017#[cfg_attr(test, assert_instr(nop))]
22018pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22019    unsafe { transmute(a) }
22020}
22021#[doc = "Vector reinterpret cast operation"]
22022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22023#[inline]
22024#[cfg(target_endian = "big")]
22025#[target_feature(enable = "neon")]
22026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22027#[cfg_attr(test, assert_instr(nop))]
22028pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22029    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22030    unsafe {
22031        let ret_val: int16x8_t = transmute(a);
22032        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22033    }
22034}
22035#[doc = "Vector reinterpret cast operation"]
22036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22037#[inline]
22038#[cfg(target_endian = "little")]
22039#[target_feature(enable = "neon")]
22040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22041#[cfg_attr(test, assert_instr(nop))]
22042pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22043    unsafe { transmute(a) }
22044}
22045#[doc = "Vector reinterpret cast operation"]
22046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22047#[inline]
22048#[cfg(target_endian = "big")]
22049#[target_feature(enable = "neon")]
22050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22051#[cfg_attr(test, assert_instr(nop))]
22052pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22053    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22054    unsafe {
22055        let ret_val: int32x4_t = transmute(a);
22056        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22057    }
22058}
22059#[doc = "Vector reinterpret cast operation"]
22060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22061#[inline]
22062#[cfg(target_endian = "little")]
22063#[target_feature(enable = "neon")]
22064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22065#[cfg_attr(test, assert_instr(nop))]
22066pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22067    unsafe { transmute(a) }
22068}
22069#[doc = "Vector reinterpret cast operation"]
22070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22071#[inline]
22072#[cfg(target_endian = "big")]
22073#[target_feature(enable = "neon")]
22074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22075#[cfg_attr(test, assert_instr(nop))]
22076pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22077    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22078    unsafe {
22079        let ret_val: int64x2_t = transmute(a);
22080        simd_shuffle!(ret_val, ret_val, [1, 0])
22081    }
22082}
22083#[doc = "Vector reinterpret cast operation"]
22084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22085#[inline]
22086#[cfg(target_endian = "little")]
22087#[target_feature(enable = "neon")]
22088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22089#[cfg_attr(test, assert_instr(nop))]
22090pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22091    unsafe { transmute(a) }
22092}
22093#[doc = "Vector reinterpret cast operation"]
22094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22095#[inline]
22096#[cfg(target_endian = "big")]
22097#[target_feature(enable = "neon")]
22098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22099#[cfg_attr(test, assert_instr(nop))]
22100pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22101    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22102    unsafe {
22103        let ret_val: uint8x16_t = transmute(a);
22104        simd_shuffle!(
22105            ret_val,
22106            ret_val,
22107            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22108        )
22109    }
22110}
22111#[doc = "Vector reinterpret cast operation"]
22112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22113#[inline]
22114#[cfg(target_endian = "little")]
22115#[target_feature(enable = "neon")]
22116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22117#[cfg_attr(test, assert_instr(nop))]
22118pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22119    unsafe { transmute(a) }
22120}
22121#[doc = "Vector reinterpret cast operation"]
22122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22123#[inline]
22124#[cfg(target_endian = "big")]
22125#[target_feature(enable = "neon")]
22126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22127#[cfg_attr(test, assert_instr(nop))]
22128pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22129    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22130    unsafe {
22131        let ret_val: uint16x8_t = transmute(a);
22132        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22133    }
22134}
22135#[doc = "Vector reinterpret cast operation"]
22136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22137#[inline]
22138#[cfg(target_endian = "little")]
22139#[target_feature(enable = "neon")]
22140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22141#[cfg_attr(test, assert_instr(nop))]
22142pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22143    unsafe { transmute(a) }
22144}
22145#[doc = "Vector reinterpret cast operation"]
22146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22147#[inline]
22148#[cfg(target_endian = "big")]
22149#[target_feature(enable = "neon")]
22150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22151#[cfg_attr(test, assert_instr(nop))]
22152pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22153    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22154    unsafe {
22155        let ret_val: uint32x4_t = transmute(a);
22156        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22157    }
22158}
22159#[doc = "Vector reinterpret cast operation"]
22160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22161#[inline]
22162#[cfg(target_endian = "little")]
22163#[target_feature(enable = "neon")]
22164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22165#[cfg_attr(test, assert_instr(nop))]
22166pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22167    unsafe { transmute(a) }
22168}
22169#[doc = "Vector reinterpret cast operation"]
22170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22171#[inline]
22172#[cfg(target_endian = "big")]
22173#[target_feature(enable = "neon")]
22174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22175#[cfg_attr(test, assert_instr(nop))]
22176pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22177    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22178    unsafe {
22179        let ret_val: uint64x2_t = transmute(a);
22180        simd_shuffle!(ret_val, ret_val, [1, 0])
22181    }
22182}
22183#[doc = "Vector reinterpret cast operation"]
22184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22185#[inline]
22186#[cfg(target_endian = "little")]
22187#[target_feature(enable = "neon")]
22188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22189#[cfg_attr(test, assert_instr(nop))]
22190pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22191    unsafe { transmute(a) }
22192}
22193#[doc = "Vector reinterpret cast operation"]
22194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22195#[inline]
22196#[cfg(target_endian = "big")]
22197#[target_feature(enable = "neon")]
22198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22199#[cfg_attr(test, assert_instr(nop))]
22200pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22201    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22202    unsafe {
22203        let ret_val: poly8x16_t = transmute(a);
22204        simd_shuffle!(
22205            ret_val,
22206            ret_val,
22207            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22208        )
22209    }
22210}
22211#[doc = "Vector reinterpret cast operation"]
22212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22213#[inline]
22214#[cfg(target_endian = "little")]
22215#[target_feature(enable = "neon")]
22216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22217#[cfg_attr(test, assert_instr(nop))]
22218pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22219    unsafe { transmute(a) }
22220}
22221#[doc = "Vector reinterpret cast operation"]
22222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22223#[inline]
22224#[cfg(target_endian = "big")]
22225#[target_feature(enable = "neon")]
22226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22227#[cfg_attr(test, assert_instr(nop))]
22228pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22229    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22230    unsafe {
22231        let ret_val: poly16x8_t = transmute(a);
22232        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22233    }
22234}
22235#[doc = "Vector reinterpret cast operation"]
22236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22237#[inline]
22238#[cfg(target_endian = "little")]
22239#[target_feature(enable = "neon")]
22240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22241#[cfg_attr(test, assert_instr(nop))]
22242pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22243    unsafe { transmute(a) }
22244}
22245#[doc = "Vector reinterpret cast operation"]
22246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22247#[inline]
22248#[cfg(target_endian = "big")]
22249#[target_feature(enable = "neon")]
22250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22251#[cfg_attr(test, assert_instr(nop))]
22252pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22253    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22254    unsafe {
22255        let ret_val: poly64x2_t = transmute(a);
22256        simd_shuffle!(ret_val, ret_val, [1, 0])
22257    }
22258}
22259#[doc = "Vector reinterpret cast operation"]
22260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22261#[inline]
22262#[cfg(target_endian = "little")]
22263#[target_feature(enable = "neon")]
22264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22265#[cfg_attr(test, assert_instr(nop))]
22266pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22267    unsafe { transmute(a) }
22268}
22269#[doc = "Vector reinterpret cast operation"]
22270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22271#[inline]
22272#[cfg(target_endian = "big")]
22273#[target_feature(enable = "neon")]
22274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22275#[cfg_attr(test, assert_instr(nop))]
22276pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22277    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22278    unsafe { transmute(a) }
22279}
22280#[doc = "Vector reinterpret cast operation"]
22281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22282#[inline]
22283#[cfg(target_endian = "little")]
22284#[target_feature(enable = "neon")]
22285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22286#[cfg_attr(test, assert_instr(nop))]
22287pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22288    unsafe { transmute(a) }
22289}
22290#[doc = "Vector reinterpret cast operation"]
22291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22292#[inline]
22293#[cfg(target_endian = "big")]
22294#[target_feature(enable = "neon")]
22295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22296#[cfg_attr(test, assert_instr(nop))]
22297pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22298    let a: int8x16_t =
22299        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22300    unsafe {
22301        let ret_val: float64x2_t = transmute(a);
22302        simd_shuffle!(ret_val, ret_val, [1, 0])
22303    }
22304}
22305#[doc = "Vector reinterpret cast operation"]
22306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22307#[inline]
22308#[cfg(target_endian = "little")]
22309#[target_feature(enable = "neon")]
22310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22311#[cfg_attr(test, assert_instr(nop))]
22312pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22313    unsafe { transmute(a) }
22314}
22315#[doc = "Vector reinterpret cast operation"]
22316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22317#[inline]
22318#[cfg(target_endian = "big")]
22319#[target_feature(enable = "neon")]
22320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22321#[cfg_attr(test, assert_instr(nop))]
22322pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22323    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22324    unsafe { transmute(a) }
22325}
22326#[doc = "Vector reinterpret cast operation"]
22327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22328#[inline]
22329#[cfg(target_endian = "little")]
22330#[target_feature(enable = "neon")]
22331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22332#[cfg_attr(test, assert_instr(nop))]
22333pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22334    unsafe { transmute(a) }
22335}
22336#[doc = "Vector reinterpret cast operation"]
22337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22338#[inline]
22339#[cfg(target_endian = "big")]
22340#[target_feature(enable = "neon")]
22341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22342#[cfg_attr(test, assert_instr(nop))]
22343pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22344    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22345    unsafe {
22346        let ret_val: float64x2_t = transmute(a);
22347        simd_shuffle!(ret_val, ret_val, [1, 0])
22348    }
22349}
22350#[doc = "Vector reinterpret cast operation"]
22351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22352#[inline]
22353#[cfg(target_endian = "little")]
22354#[target_feature(enable = "neon")]
22355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22356#[cfg_attr(test, assert_instr(nop))]
22357pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22358    unsafe { transmute(a) }
22359}
22360#[doc = "Vector reinterpret cast operation"]
22361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22362#[inline]
22363#[cfg(target_endian = "big")]
22364#[target_feature(enable = "neon")]
22365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22366#[cfg_attr(test, assert_instr(nop))]
22367pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22368    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22369    unsafe { transmute(a) }
22370}
22371#[doc = "Vector reinterpret cast operation"]
22372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22373#[inline]
22374#[cfg(target_endian = "little")]
22375#[target_feature(enable = "neon")]
22376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22377#[cfg_attr(test, assert_instr(nop))]
22378pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22379    unsafe { transmute(a) }
22380}
22381#[doc = "Vector reinterpret cast operation"]
22382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22383#[inline]
22384#[cfg(target_endian = "big")]
22385#[target_feature(enable = "neon")]
22386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22387#[cfg_attr(test, assert_instr(nop))]
22388pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22389    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22390    unsafe {
22391        let ret_val: float64x2_t = transmute(a);
22392        simd_shuffle!(ret_val, ret_val, [1, 0])
22393    }
22394}
22395#[doc = "Vector reinterpret cast operation"]
22396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22397#[inline]
22398#[target_feature(enable = "neon")]
22399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22400#[cfg_attr(test, assert_instr(nop))]
22401pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22402    unsafe { transmute(a) }
22403}
22404#[doc = "Vector reinterpret cast operation"]
22405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22406#[inline]
22407#[target_feature(enable = "neon")]
22408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22409#[cfg_attr(test, assert_instr(nop))]
22410pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22411    unsafe { transmute(a) }
22412}
22413#[doc = "Vector reinterpret cast operation"]
22414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22415#[inline]
22416#[cfg(target_endian = "little")]
22417#[target_feature(enable = "neon")]
22418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22419#[cfg_attr(test, assert_instr(nop))]
22420pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22421    unsafe { transmute(a) }
22422}
22423#[doc = "Vector reinterpret cast operation"]
22424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22425#[inline]
22426#[cfg(target_endian = "big")]
22427#[target_feature(enable = "neon")]
22428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22429#[cfg_attr(test, assert_instr(nop))]
22430pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22431    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22432    unsafe {
22433        let ret_val: float64x2_t = transmute(a);
22434        simd_shuffle!(ret_val, ret_val, [1, 0])
22435    }
22436}
22437#[doc = "Vector reinterpret cast operation"]
22438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22439#[inline]
22440#[cfg(target_endian = "little")]
22441#[target_feature(enable = "neon")]
22442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22443#[cfg_attr(test, assert_instr(nop))]
22444pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22445    unsafe { transmute(a) }
22446}
22447#[doc = "Vector reinterpret cast operation"]
22448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22449#[inline]
22450#[cfg(target_endian = "big")]
22451#[target_feature(enable = "neon")]
22452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22453#[cfg_attr(test, assert_instr(nop))]
22454pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22455    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22456    unsafe {
22457        let ret_val: poly64x2_t = transmute(a);
22458        simd_shuffle!(ret_val, ret_val, [1, 0])
22459    }
22460}
22461#[doc = "Vector reinterpret cast operation"]
22462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22463#[inline]
22464#[cfg(target_endian = "little")]
22465#[target_feature(enable = "neon")]
22466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22467#[cfg_attr(test, assert_instr(nop))]
22468pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22469    unsafe { transmute(a) }
22470}
22471#[doc = "Vector reinterpret cast operation"]
22472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22473#[inline]
22474#[cfg(target_endian = "big")]
22475#[target_feature(enable = "neon")]
22476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22477#[cfg_attr(test, assert_instr(nop))]
22478pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22479    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22480    unsafe { transmute(a) }
22481}
22482#[doc = "Vector reinterpret cast operation"]
22483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22484#[inline]
22485#[cfg(target_endian = "little")]
22486#[target_feature(enable = "neon")]
22487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22488#[cfg_attr(test, assert_instr(nop))]
22489pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22490    unsafe { transmute(a) }
22491}
22492#[doc = "Vector reinterpret cast operation"]
22493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22494#[inline]
22495#[cfg(target_endian = "big")]
22496#[target_feature(enable = "neon")]
22497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22498#[cfg_attr(test, assert_instr(nop))]
22499pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22500    let a: uint8x16_t =
22501        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22502    unsafe {
22503        let ret_val: float64x2_t = transmute(a);
22504        simd_shuffle!(ret_val, ret_val, [1, 0])
22505    }
22506}
22507#[doc = "Vector reinterpret cast operation"]
22508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22509#[inline]
22510#[cfg(target_endian = "little")]
22511#[target_feature(enable = "neon")]
22512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22513#[cfg_attr(test, assert_instr(nop))]
22514pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22515    unsafe { transmute(a) }
22516}
22517#[doc = "Vector reinterpret cast operation"]
22518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22519#[inline]
22520#[cfg(target_endian = "big")]
22521#[target_feature(enable = "neon")]
22522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22523#[cfg_attr(test, assert_instr(nop))]
22524pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22525    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22526    unsafe { transmute(a) }
22527}
22528#[doc = "Vector reinterpret cast operation"]
22529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22530#[inline]
22531#[cfg(target_endian = "little")]
22532#[target_feature(enable = "neon")]
22533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22534#[cfg_attr(test, assert_instr(nop))]
22535pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22536    unsafe { transmute(a) }
22537}
22538#[doc = "Vector reinterpret cast operation"]
22539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22540#[inline]
22541#[cfg(target_endian = "big")]
22542#[target_feature(enable = "neon")]
22543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22544#[cfg_attr(test, assert_instr(nop))]
22545pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22546    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22547    unsafe {
22548        let ret_val: float64x2_t = transmute(a);
22549        simd_shuffle!(ret_val, ret_val, [1, 0])
22550    }
22551}
22552#[doc = "Vector reinterpret cast operation"]
22553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22554#[inline]
22555#[cfg(target_endian = "little")]
22556#[target_feature(enable = "neon")]
22557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22558#[cfg_attr(test, assert_instr(nop))]
22559pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22560    unsafe { transmute(a) }
22561}
22562#[doc = "Vector reinterpret cast operation"]
22563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22564#[inline]
22565#[cfg(target_endian = "big")]
22566#[target_feature(enable = "neon")]
22567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22568#[cfg_attr(test, assert_instr(nop))]
22569pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22570    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22571    unsafe { transmute(a) }
22572}
22573#[doc = "Vector reinterpret cast operation"]
22574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22575#[inline]
22576#[cfg(target_endian = "little")]
22577#[target_feature(enable = "neon")]
22578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22579#[cfg_attr(test, assert_instr(nop))]
22580pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22581    unsafe { transmute(a) }
22582}
22583#[doc = "Vector reinterpret cast operation"]
22584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22585#[inline]
22586#[cfg(target_endian = "big")]
22587#[target_feature(enable = "neon")]
22588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22589#[cfg_attr(test, assert_instr(nop))]
22590pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22591    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22592    unsafe {
22593        let ret_val: float64x2_t = transmute(a);
22594        simd_shuffle!(ret_val, ret_val, [1, 0])
22595    }
22596}
22597#[doc = "Vector reinterpret cast operation"]
22598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22599#[inline]
22600#[target_feature(enable = "neon")]
22601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22602#[cfg_attr(test, assert_instr(nop))]
22603pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22604    unsafe { transmute(a) }
22605}
22606#[doc = "Vector reinterpret cast operation"]
22607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22608#[inline]
22609#[target_feature(enable = "neon")]
22610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22611#[cfg_attr(test, assert_instr(nop))]
22612pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22613    unsafe { transmute(a) }
22614}
22615#[doc = "Vector reinterpret cast operation"]
22616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22617#[inline]
22618#[cfg(target_endian = "little")]
22619#[target_feature(enable = "neon")]
22620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22621#[cfg_attr(test, assert_instr(nop))]
22622pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22623    unsafe { transmute(a) }
22624}
22625#[doc = "Vector reinterpret cast operation"]
22626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22627#[inline]
22628#[cfg(target_endian = "big")]
22629#[target_feature(enable = "neon")]
22630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22631#[cfg_attr(test, assert_instr(nop))]
22632pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22633    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22634    unsafe {
22635        let ret_val: float64x2_t = transmute(a);
22636        simd_shuffle!(ret_val, ret_val, [1, 0])
22637    }
22638}
22639#[doc = "Vector reinterpret cast operation"]
22640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22641#[inline]
22642#[cfg(target_endian = "little")]
22643#[target_feature(enable = "neon")]
22644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22645#[cfg_attr(test, assert_instr(nop))]
22646pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22647    unsafe { transmute(a) }
22648}
22649#[doc = "Vector reinterpret cast operation"]
22650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22651#[inline]
22652#[cfg(target_endian = "big")]
22653#[target_feature(enable = "neon")]
22654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22655#[cfg_attr(test, assert_instr(nop))]
22656pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22657    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22658    unsafe {
22659        let ret_val: poly64x2_t = transmute(a);
22660        simd_shuffle!(ret_val, ret_val, [1, 0])
22661    }
22662}
22663#[doc = "Vector reinterpret cast operation"]
22664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22665#[inline]
22666#[cfg(target_endian = "little")]
22667#[target_feature(enable = "neon")]
22668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22669#[cfg_attr(test, assert_instr(nop))]
22670pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22671    unsafe { transmute(a) }
22672}
22673#[doc = "Vector reinterpret cast operation"]
22674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22675#[inline]
22676#[cfg(target_endian = "big")]
22677#[target_feature(enable = "neon")]
22678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22679#[cfg_attr(test, assert_instr(nop))]
22680pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22681    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22682    unsafe { transmute(a) }
22683}
22684#[doc = "Vector reinterpret cast operation"]
22685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22686#[inline]
22687#[cfg(target_endian = "little")]
22688#[target_feature(enable = "neon")]
22689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22690#[cfg_attr(test, assert_instr(nop))]
22691pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22692    unsafe { transmute(a) }
22693}
22694#[doc = "Vector reinterpret cast operation"]
22695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22696#[inline]
22697#[cfg(target_endian = "big")]
22698#[target_feature(enable = "neon")]
22699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22700#[cfg_attr(test, assert_instr(nop))]
22701pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22702    let a: poly8x16_t =
22703        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22704    unsafe {
22705        let ret_val: float64x2_t = transmute(a);
22706        simd_shuffle!(ret_val, ret_val, [1, 0])
22707    }
22708}
22709#[doc = "Vector reinterpret cast operation"]
22710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22711#[inline]
22712#[cfg(target_endian = "little")]
22713#[target_feature(enable = "neon")]
22714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22715#[cfg_attr(test, assert_instr(nop))]
22716pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22717    unsafe { transmute(a) }
22718}
22719#[doc = "Vector reinterpret cast operation"]
22720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22721#[inline]
22722#[cfg(target_endian = "big")]
22723#[target_feature(enable = "neon")]
22724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22725#[cfg_attr(test, assert_instr(nop))]
22726pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22727    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22728    unsafe { transmute(a) }
22729}
22730#[doc = "Vector reinterpret cast operation"]
22731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22732#[inline]
22733#[cfg(target_endian = "little")]
22734#[target_feature(enable = "neon")]
22735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22736#[cfg_attr(test, assert_instr(nop))]
22737pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22738    unsafe { transmute(a) }
22739}
22740#[doc = "Vector reinterpret cast operation"]
22741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22742#[inline]
22743#[cfg(target_endian = "big")]
22744#[target_feature(enable = "neon")]
22745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22746#[cfg_attr(test, assert_instr(nop))]
22747pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22748    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22749    unsafe {
22750        let ret_val: float64x2_t = transmute(a);
22751        simd_shuffle!(ret_val, ret_val, [1, 0])
22752    }
22753}
22754#[doc = "Vector reinterpret cast operation"]
22755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22756#[inline]
22757#[cfg(target_endian = "little")]
22758#[target_feature(enable = "neon")]
22759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22760#[cfg_attr(test, assert_instr(nop))]
22761pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22762    unsafe { transmute(a) }
22763}
22764#[doc = "Vector reinterpret cast operation"]
22765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22766#[inline]
22767#[cfg(target_endian = "big")]
22768#[target_feature(enable = "neon")]
22769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22770#[cfg_attr(test, assert_instr(nop))]
22771pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22772    unsafe {
22773        let ret_val: float32x2_t = transmute(a);
22774        simd_shuffle!(ret_val, ret_val, [1, 0])
22775    }
22776}
22777#[doc = "Vector reinterpret cast operation"]
22778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
22779#[inline]
22780#[target_feature(enable = "neon")]
22781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22782#[cfg_attr(test, assert_instr(nop))]
22783pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
22784    unsafe { transmute(a) }
22785}
22786#[doc = "Vector reinterpret cast operation"]
22787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
22788#[inline]
22789#[target_feature(enable = "neon")]
22790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22791#[cfg_attr(test, assert_instr(nop))]
22792pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
22793    unsafe { transmute(a) }
22794}
22795#[doc = "Vector reinterpret cast operation"]
22796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
22797#[inline]
22798#[target_feature(enable = "neon")]
22799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22800#[cfg_attr(test, assert_instr(nop))]
22801pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
22802    unsafe { transmute(a) }
22803}
22804#[doc = "Vector reinterpret cast operation"]
22805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22806#[inline]
22807#[cfg(target_endian = "little")]
22808#[target_feature(enable = "neon")]
22809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22810#[cfg_attr(test, assert_instr(nop))]
22811pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22812    unsafe { transmute(a) }
22813}
22814#[doc = "Vector reinterpret cast operation"]
22815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22816#[inline]
22817#[cfg(target_endian = "big")]
22818#[target_feature(enable = "neon")]
22819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22820#[cfg_attr(test, assert_instr(nop))]
22821pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22822    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22823    unsafe {
22824        let ret_val: float32x4_t = transmute(a);
22825        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22826    }
22827}
22828#[doc = "Vector reinterpret cast operation"]
22829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
22830#[inline]
22831#[cfg(target_endian = "little")]
22832#[target_feature(enable = "neon")]
22833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22834#[cfg_attr(test, assert_instr(nop))]
22835pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
22836    unsafe { transmute(a) }
22837}
22838#[doc = "Vector reinterpret cast operation"]
22839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
22840#[inline]
22841#[cfg(target_endian = "big")]
22842#[target_feature(enable = "neon")]
22843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22844#[cfg_attr(test, assert_instr(nop))]
22845pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
22846    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22847    unsafe {
22848        let ret_val: float64x2_t = transmute(a);
22849        simd_shuffle!(ret_val, ret_val, [1, 0])
22850    }
22851}
22852#[doc = "Vector reinterpret cast operation"]
22853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
22854#[inline]
22855#[cfg(target_endian = "little")]
22856#[target_feature(enable = "neon")]
22857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22858#[cfg_attr(test, assert_instr(nop))]
22859pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
22860    unsafe { transmute(a) }
22861}
22862#[doc = "Vector reinterpret cast operation"]
22863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
22864#[inline]
22865#[cfg(target_endian = "big")]
22866#[target_feature(enable = "neon")]
22867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22868#[cfg_attr(test, assert_instr(nop))]
22869pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
22870    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22871    unsafe {
22872        let ret_val: int64x2_t = transmute(a);
22873        simd_shuffle!(ret_val, ret_val, [1, 0])
22874    }
22875}
22876#[doc = "Vector reinterpret cast operation"]
22877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
22878#[inline]
22879#[cfg(target_endian = "little")]
22880#[target_feature(enable = "neon")]
22881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22882#[cfg_attr(test, assert_instr(nop))]
22883pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22884    unsafe { transmute(a) }
22885}
22886#[doc = "Vector reinterpret cast operation"]
22887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
22888#[inline]
22889#[cfg(target_endian = "big")]
22890#[target_feature(enable = "neon")]
22891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22892#[cfg_attr(test, assert_instr(nop))]
22893pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22894    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22895    unsafe {
22896        let ret_val: uint64x2_t = transmute(a);
22897        simd_shuffle!(ret_val, ret_val, [1, 0])
22898    }
22899}
22900#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
22902#[inline]
22903#[target_feature(enable = "neon,frintts")]
22904#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22905#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22906pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
22907    unsafe extern "unadjusted" {
22908        #[cfg_attr(
22909            any(target_arch = "aarch64", target_arch = "arm64ec"),
22910            link_name = "llvm.aarch64.neon.frint32x.v2f32"
22911        )]
22912        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
22913    }
22914    unsafe { _vrnd32x_f32(a) }
22915}
22916#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
22918#[inline]
22919#[target_feature(enable = "neon,frintts")]
22920#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22921#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22922pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
22923    unsafe extern "unadjusted" {
22924        #[cfg_attr(
22925            any(target_arch = "aarch64", target_arch = "arm64ec"),
22926            link_name = "llvm.aarch64.neon.frint32x.v4f32"
22927        )]
22928        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
22929    }
22930    unsafe { _vrnd32xq_f32(a) }
22931}
22932#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
22934#[inline]
22935#[target_feature(enable = "neon,frintts")]
22936#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22937#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22938pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
22939    unsafe extern "unadjusted" {
22940        #[cfg_attr(
22941            any(target_arch = "aarch64", target_arch = "arm64ec"),
22942            link_name = "llvm.aarch64.neon.frint32x.v2f64"
22943        )]
22944        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
22945    }
22946    unsafe { _vrnd32xq_f64(a) }
22947}
22948#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
22950#[inline]
22951#[target_feature(enable = "neon,frintts")]
22952#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22953#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22954pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
22955    unsafe extern "unadjusted" {
22956        #[cfg_attr(
22957            any(target_arch = "aarch64", target_arch = "arm64ec"),
22958            link_name = "llvm.aarch64.frint32x.f64"
22959        )]
22960        fn _vrnd32x_f64(a: f64) -> f64;
22961    }
22962    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
22963}
22964#[doc = "Floating-point round to 32-bit integer toward zero"]
22965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
22966#[inline]
22967#[target_feature(enable = "neon,frintts")]
22968#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22969#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22970pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
22971    unsafe extern "unadjusted" {
22972        #[cfg_attr(
22973            any(target_arch = "aarch64", target_arch = "arm64ec"),
22974            link_name = "llvm.aarch64.neon.frint32z.v2f32"
22975        )]
22976        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
22977    }
22978    unsafe { _vrnd32z_f32(a) }
22979}
22980#[doc = "Floating-point round to 32-bit integer toward zero"]
22981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
22982#[inline]
22983#[target_feature(enable = "neon,frintts")]
22984#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22985#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22986pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
22987    unsafe extern "unadjusted" {
22988        #[cfg_attr(
22989            any(target_arch = "aarch64", target_arch = "arm64ec"),
22990            link_name = "llvm.aarch64.neon.frint32z.v4f32"
22991        )]
22992        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
22993    }
22994    unsafe { _vrnd32zq_f32(a) }
22995}
22996#[doc = "Floating-point round to 32-bit integer toward zero"]
22997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
22998#[inline]
22999#[target_feature(enable = "neon,frintts")]
23000#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23001#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23002pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23003    unsafe extern "unadjusted" {
23004        #[cfg_attr(
23005            any(target_arch = "aarch64", target_arch = "arm64ec"),
23006            link_name = "llvm.aarch64.neon.frint32z.v2f64"
23007        )]
23008        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23009    }
23010    unsafe { _vrnd32zq_f64(a) }
23011}
23012#[doc = "Floating-point round to 32-bit integer toward zero"]
23013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23014#[inline]
23015#[target_feature(enable = "neon,frintts")]
23016#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23017#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23018pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23019    unsafe extern "unadjusted" {
23020        #[cfg_attr(
23021            any(target_arch = "aarch64", target_arch = "arm64ec"),
23022            link_name = "llvm.aarch64.frint32z.f64"
23023        )]
23024        fn _vrnd32z_f64(a: f64) -> f64;
23025    }
23026    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23027}
23028#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23030#[inline]
23031#[target_feature(enable = "neon,frintts")]
23032#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23033#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23034pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23035    unsafe extern "unadjusted" {
23036        #[cfg_attr(
23037            any(target_arch = "aarch64", target_arch = "arm64ec"),
23038            link_name = "llvm.aarch64.neon.frint64x.v2f32"
23039        )]
23040        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23041    }
23042    unsafe { _vrnd64x_f32(a) }
23043}
23044#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23046#[inline]
23047#[target_feature(enable = "neon,frintts")]
23048#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23050pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23051    unsafe extern "unadjusted" {
23052        #[cfg_attr(
23053            any(target_arch = "aarch64", target_arch = "arm64ec"),
23054            link_name = "llvm.aarch64.neon.frint64x.v4f32"
23055        )]
23056        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23057    }
23058    unsafe { _vrnd64xq_f32(a) }
23059}
23060#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23062#[inline]
23063#[target_feature(enable = "neon,frintts")]
23064#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23065#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23066pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23067    unsafe extern "unadjusted" {
23068        #[cfg_attr(
23069            any(target_arch = "aarch64", target_arch = "arm64ec"),
23070            link_name = "llvm.aarch64.neon.frint64x.v2f64"
23071        )]
23072        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23073    }
23074    unsafe { _vrnd64xq_f64(a) }
23075}
23076#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23078#[inline]
23079#[target_feature(enable = "neon,frintts")]
23080#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23081#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23082pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23083    unsafe extern "unadjusted" {
23084        #[cfg_attr(
23085            any(target_arch = "aarch64", target_arch = "arm64ec"),
23086            link_name = "llvm.aarch64.frint64x.f64"
23087        )]
23088        fn _vrnd64x_f64(a: f64) -> f64;
23089    }
23090    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23091}
23092#[doc = "Floating-point round to 64-bit integer toward zero"]
23093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23094#[inline]
23095#[target_feature(enable = "neon,frintts")]
23096#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23097#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23098pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23099    unsafe extern "unadjusted" {
23100        #[cfg_attr(
23101            any(target_arch = "aarch64", target_arch = "arm64ec"),
23102            link_name = "llvm.aarch64.neon.frint64z.v2f32"
23103        )]
23104        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23105    }
23106    unsafe { _vrnd64z_f32(a) }
23107}
23108#[doc = "Floating-point round to 64-bit integer toward zero"]
23109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23110#[inline]
23111#[target_feature(enable = "neon,frintts")]
23112#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23114pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23115    unsafe extern "unadjusted" {
23116        #[cfg_attr(
23117            any(target_arch = "aarch64", target_arch = "arm64ec"),
23118            link_name = "llvm.aarch64.neon.frint64z.v4f32"
23119        )]
23120        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23121    }
23122    unsafe { _vrnd64zq_f32(a) }
23123}
23124#[doc = "Floating-point round to 64-bit integer toward zero"]
23125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23126#[inline]
23127#[target_feature(enable = "neon,frintts")]
23128#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23129#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23130pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23131    unsafe extern "unadjusted" {
23132        #[cfg_attr(
23133            any(target_arch = "aarch64", target_arch = "arm64ec"),
23134            link_name = "llvm.aarch64.neon.frint64z.v2f64"
23135        )]
23136        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23137    }
23138    unsafe { _vrnd64zq_f64(a) }
23139}
23140#[doc = "Floating-point round to 64-bit integer toward zero"]
23141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23142#[inline]
23143#[target_feature(enable = "neon,frintts")]
23144#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23145#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23146pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23147    unsafe extern "unadjusted" {
23148        #[cfg_attr(
23149            any(target_arch = "aarch64", target_arch = "arm64ec"),
23150            link_name = "llvm.aarch64.frint64z.f64"
23151        )]
23152        fn _vrnd64z_f64(a: f64) -> f64;
23153    }
23154    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23155}
23156#[doc = "Floating-point round to integral, toward zero"]
23157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23158#[inline]
23159#[target_feature(enable = "neon,fp16")]
23160#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23161#[cfg(not(target_arch = "arm64ec"))]
23162#[cfg_attr(test, assert_instr(frintz))]
23163pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23164    unsafe { simd_trunc(a) }
23165}
23166#[doc = "Floating-point round to integral, toward zero"]
23167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23168#[inline]
23169#[target_feature(enable = "neon,fp16")]
23170#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23171#[cfg(not(target_arch = "arm64ec"))]
23172#[cfg_attr(test, assert_instr(frintz))]
23173pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23174    unsafe { simd_trunc(a) }
23175}
23176#[doc = "Floating-point round to integral, toward zero"]
23177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23178#[inline]
23179#[target_feature(enable = "neon")]
23180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23181#[cfg_attr(test, assert_instr(frintz))]
23182pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23183    unsafe { simd_trunc(a) }
23184}
23185#[doc = "Floating-point round to integral, toward zero"]
23186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23187#[inline]
23188#[target_feature(enable = "neon")]
23189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23190#[cfg_attr(test, assert_instr(frintz))]
23191pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23192    unsafe { simd_trunc(a) }
23193}
23194#[doc = "Floating-point round to integral, toward zero"]
23195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23196#[inline]
23197#[target_feature(enable = "neon")]
23198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23199#[cfg_attr(test, assert_instr(frintz))]
23200pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23201    unsafe { simd_trunc(a) }
23202}
23203#[doc = "Floating-point round to integral, toward zero"]
23204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23205#[inline]
23206#[target_feature(enable = "neon")]
23207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23208#[cfg_attr(test, assert_instr(frintz))]
23209pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23210    unsafe { simd_trunc(a) }
23211}
23212#[doc = "Floating-point round to integral, to nearest with ties to away"]
23213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23214#[inline]
23215#[target_feature(enable = "neon,fp16")]
23216#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23217#[cfg(not(target_arch = "arm64ec"))]
23218#[cfg_attr(test, assert_instr(frinta))]
23219pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23220    unsafe { simd_round(a) }
23221}
23222#[doc = "Floating-point round to integral, to nearest with ties to away"]
23223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23224#[inline]
23225#[target_feature(enable = "neon,fp16")]
23226#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23227#[cfg(not(target_arch = "arm64ec"))]
23228#[cfg_attr(test, assert_instr(frinta))]
23229pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23230    unsafe { simd_round(a) }
23231}
23232#[doc = "Floating-point round to integral, to nearest with ties to away"]
23233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23234#[inline]
23235#[target_feature(enable = "neon")]
23236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23237#[cfg_attr(test, assert_instr(frinta))]
23238pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23239    unsafe { simd_round(a) }
23240}
23241#[doc = "Floating-point round to integral, to nearest with ties to away"]
23242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23243#[inline]
23244#[target_feature(enable = "neon")]
23245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23246#[cfg_attr(test, assert_instr(frinta))]
23247pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23248    unsafe { simd_round(a) }
23249}
23250#[doc = "Floating-point round to integral, to nearest with ties to away"]
23251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23252#[inline]
23253#[target_feature(enable = "neon")]
23254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23255#[cfg_attr(test, assert_instr(frinta))]
23256pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23257    unsafe { simd_round(a) }
23258}
23259#[doc = "Floating-point round to integral, to nearest with ties to away"]
23260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23261#[inline]
23262#[target_feature(enable = "neon")]
23263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23264#[cfg_attr(test, assert_instr(frinta))]
23265pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23266    unsafe { simd_round(a) }
23267}
23268#[doc = "Floating-point round to integral, to nearest with ties to away"]
23269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23270#[inline]
23271#[target_feature(enable = "neon,fp16")]
23272#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23273#[cfg(not(target_arch = "arm64ec"))]
23274#[cfg_attr(test, assert_instr(frinta))]
23275pub fn vrndah_f16(a: f16) -> f16 {
23276    roundf16(a)
23277}
23278#[doc = "Floating-point round to integral, to nearest with ties to away"]
23279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23280#[inline]
23281#[target_feature(enable = "neon,fp16")]
23282#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23283#[cfg(not(target_arch = "arm64ec"))]
23284#[cfg_attr(test, assert_instr(frintz))]
23285pub fn vrndh_f16(a: f16) -> f16 {
23286    truncf16(a)
23287}
23288#[doc = "Floating-point round to integral, using current rounding mode"]
23289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23290#[inline]
23291#[target_feature(enable = "neon,fp16")]
23292#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23293#[cfg(not(target_arch = "arm64ec"))]
23294#[cfg_attr(test, assert_instr(frinti))]
23295pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23296    unsafe extern "unadjusted" {
23297        #[cfg_attr(
23298            any(target_arch = "aarch64", target_arch = "arm64ec"),
23299            link_name = "llvm.nearbyint.v4f16"
23300        )]
23301        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23302    }
23303    unsafe { _vrndi_f16(a) }
23304}
23305#[doc = "Floating-point round to integral, using current rounding mode"]
23306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23307#[inline]
23308#[target_feature(enable = "neon,fp16")]
23309#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23310#[cfg(not(target_arch = "arm64ec"))]
23311#[cfg_attr(test, assert_instr(frinti))]
23312pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23313    unsafe extern "unadjusted" {
23314        #[cfg_attr(
23315            any(target_arch = "aarch64", target_arch = "arm64ec"),
23316            link_name = "llvm.nearbyint.v8f16"
23317        )]
23318        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23319    }
23320    unsafe { _vrndiq_f16(a) }
23321}
23322#[doc = "Floating-point round to integral, using current rounding mode"]
23323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23324#[inline]
23325#[target_feature(enable = "neon")]
23326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23327#[cfg_attr(test, assert_instr(frinti))]
23328pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23329    unsafe extern "unadjusted" {
23330        #[cfg_attr(
23331            any(target_arch = "aarch64", target_arch = "arm64ec"),
23332            link_name = "llvm.nearbyint.v2f32"
23333        )]
23334        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23335    }
23336    unsafe { _vrndi_f32(a) }
23337}
23338#[doc = "Floating-point round to integral, using current rounding mode"]
23339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23340#[inline]
23341#[target_feature(enable = "neon")]
23342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23343#[cfg_attr(test, assert_instr(frinti))]
23344pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23345    unsafe extern "unadjusted" {
23346        #[cfg_attr(
23347            any(target_arch = "aarch64", target_arch = "arm64ec"),
23348            link_name = "llvm.nearbyint.v4f32"
23349        )]
23350        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23351    }
23352    unsafe { _vrndiq_f32(a) }
23353}
23354#[doc = "Floating-point round to integral, using current rounding mode"]
23355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23356#[inline]
23357#[target_feature(enable = "neon")]
23358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23359#[cfg_attr(test, assert_instr(frinti))]
23360pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23361    unsafe extern "unadjusted" {
23362        #[cfg_attr(
23363            any(target_arch = "aarch64", target_arch = "arm64ec"),
23364            link_name = "llvm.nearbyint.v1f64"
23365        )]
23366        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23367    }
23368    unsafe { _vrndi_f64(a) }
23369}
23370#[doc = "Floating-point round to integral, using current rounding mode"]
23371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23372#[inline]
23373#[target_feature(enable = "neon")]
23374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23375#[cfg_attr(test, assert_instr(frinti))]
23376pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23377    unsafe extern "unadjusted" {
23378        #[cfg_attr(
23379            any(target_arch = "aarch64", target_arch = "arm64ec"),
23380            link_name = "llvm.nearbyint.v2f64"
23381        )]
23382        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23383    }
23384    unsafe { _vrndiq_f64(a) }
23385}
23386#[doc = "Floating-point round to integral, using current rounding mode"]
23387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23388#[inline]
23389#[target_feature(enable = "neon,fp16")]
23390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23391#[cfg(not(target_arch = "arm64ec"))]
23392#[cfg_attr(test, assert_instr(frinti))]
23393pub fn vrndih_f16(a: f16) -> f16 {
23394    unsafe extern "unadjusted" {
23395        #[cfg_attr(
23396            any(target_arch = "aarch64", target_arch = "arm64ec"),
23397            link_name = "llvm.nearbyint.f16"
23398        )]
23399        fn _vrndih_f16(a: f16) -> f16;
23400    }
23401    unsafe { _vrndih_f16(a) }
23402}
23403#[doc = "Floating-point round to integral, toward minus infinity"]
23404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23405#[inline]
23406#[target_feature(enable = "neon,fp16")]
23407#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23408#[cfg(not(target_arch = "arm64ec"))]
23409#[cfg_attr(test, assert_instr(frintm))]
23410pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23411    unsafe { simd_floor(a) }
23412}
23413#[doc = "Floating-point round to integral, toward minus infinity"]
23414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23415#[inline]
23416#[target_feature(enable = "neon,fp16")]
23417#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23418#[cfg(not(target_arch = "arm64ec"))]
23419#[cfg_attr(test, assert_instr(frintm))]
23420pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23421    unsafe { simd_floor(a) }
23422}
23423#[doc = "Floating-point round to integral, toward minus infinity"]
23424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23425#[inline]
23426#[target_feature(enable = "neon")]
23427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23428#[cfg_attr(test, assert_instr(frintm))]
23429pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23430    unsafe { simd_floor(a) }
23431}
23432#[doc = "Floating-point round to integral, toward minus infinity"]
23433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23434#[inline]
23435#[target_feature(enable = "neon")]
23436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23437#[cfg_attr(test, assert_instr(frintm))]
23438pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23439    unsafe { simd_floor(a) }
23440}
23441#[doc = "Floating-point round to integral, toward minus infinity"]
23442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23443#[inline]
23444#[target_feature(enable = "neon")]
23445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23446#[cfg_attr(test, assert_instr(frintm))]
23447pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23448    unsafe { simd_floor(a) }
23449}
23450#[doc = "Floating-point round to integral, toward minus infinity"]
23451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23452#[inline]
23453#[target_feature(enable = "neon")]
23454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23455#[cfg_attr(test, assert_instr(frintm))]
23456pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23457    unsafe { simd_floor(a) }
23458}
23459#[doc = "Floating-point round to integral, toward minus infinity"]
23460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23461#[inline]
23462#[target_feature(enable = "neon,fp16")]
23463#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23464#[cfg(not(target_arch = "arm64ec"))]
23465#[cfg_attr(test, assert_instr(frintm))]
23466pub fn vrndmh_f16(a: f16) -> f16 {
23467    floorf16(a)
23468}
23469#[doc = "Floating-point round to integral, to nearest with ties to even"]
23470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23471#[inline]
23472#[target_feature(enable = "neon")]
23473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23474#[cfg_attr(test, assert_instr(frintn))]
23475pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23476    unsafe extern "unadjusted" {
23477        #[cfg_attr(
23478            any(target_arch = "aarch64", target_arch = "arm64ec"),
23479            link_name = "llvm.roundeven.v1f64"
23480        )]
23481        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23482    }
23483    unsafe { _vrndn_f64(a) }
23484}
23485#[doc = "Floating-point round to integral, to nearest with ties to even"]
23486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23487#[inline]
23488#[target_feature(enable = "neon")]
23489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23490#[cfg_attr(test, assert_instr(frintn))]
23491pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23492    unsafe extern "unadjusted" {
23493        #[cfg_attr(
23494            any(target_arch = "aarch64", target_arch = "arm64ec"),
23495            link_name = "llvm.roundeven.v2f64"
23496        )]
23497        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23498    }
23499    unsafe { _vrndnq_f64(a) }
23500}
23501#[doc = "Floating-point round to integral, toward minus infinity"]
23502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23503#[inline]
23504#[target_feature(enable = "neon,fp16")]
23505#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23506#[cfg(not(target_arch = "arm64ec"))]
23507#[cfg_attr(test, assert_instr(frintn))]
23508pub fn vrndnh_f16(a: f16) -> f16 {
23509    unsafe extern "unadjusted" {
23510        #[cfg_attr(
23511            any(target_arch = "aarch64", target_arch = "arm64ec"),
23512            link_name = "llvm.roundeven.f16"
23513        )]
23514        fn _vrndnh_f16(a: f16) -> f16;
23515    }
23516    unsafe { _vrndnh_f16(a) }
23517}
23518#[doc = "Floating-point round to integral, to nearest with ties to even"]
23519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23520#[inline]
23521#[target_feature(enable = "neon")]
23522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23523#[cfg_attr(test, assert_instr(frintn))]
23524pub fn vrndns_f32(a: f32) -> f32 {
23525    unsafe extern "unadjusted" {
23526        #[cfg_attr(
23527            any(target_arch = "aarch64", target_arch = "arm64ec"),
23528            link_name = "llvm.roundeven.f32"
23529        )]
23530        fn _vrndns_f32(a: f32) -> f32;
23531    }
23532    unsafe { _vrndns_f32(a) }
23533}
23534#[doc = "Floating-point round to integral, toward plus infinity"]
23535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23536#[inline]
23537#[target_feature(enable = "neon,fp16")]
23538#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23539#[cfg(not(target_arch = "arm64ec"))]
23540#[cfg_attr(test, assert_instr(frintp))]
23541pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23542    unsafe { simd_ceil(a) }
23543}
23544#[doc = "Floating-point round to integral, toward plus infinity"]
23545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23546#[inline]
23547#[target_feature(enable = "neon,fp16")]
23548#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23549#[cfg(not(target_arch = "arm64ec"))]
23550#[cfg_attr(test, assert_instr(frintp))]
23551pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23552    unsafe { simd_ceil(a) }
23553}
23554#[doc = "Floating-point round to integral, toward plus infinity"]
23555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23556#[inline]
23557#[target_feature(enable = "neon")]
23558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23559#[cfg_attr(test, assert_instr(frintp))]
23560pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23561    unsafe { simd_ceil(a) }
23562}
23563#[doc = "Floating-point round to integral, toward plus infinity"]
23564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23565#[inline]
23566#[target_feature(enable = "neon")]
23567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23568#[cfg_attr(test, assert_instr(frintp))]
23569pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23570    unsafe { simd_ceil(a) }
23571}
23572#[doc = "Floating-point round to integral, toward plus infinity"]
23573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23574#[inline]
23575#[target_feature(enable = "neon")]
23576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23577#[cfg_attr(test, assert_instr(frintp))]
23578pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23579    unsafe { simd_ceil(a) }
23580}
23581#[doc = "Floating-point round to integral, toward plus infinity"]
23582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23583#[inline]
23584#[target_feature(enable = "neon")]
23585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23586#[cfg_attr(test, assert_instr(frintp))]
23587pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23588    unsafe { simd_ceil(a) }
23589}
23590#[doc = "Floating-point round to integral, toward plus infinity"]
23591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23592#[inline]
23593#[target_feature(enable = "neon,fp16")]
23594#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23595#[cfg(not(target_arch = "arm64ec"))]
23596#[cfg_attr(test, assert_instr(frintp))]
23597pub fn vrndph_f16(a: f16) -> f16 {
23598    ceilf16(a)
23599}
23600#[doc = "Floating-point round to integral exact, using current rounding mode"]
23601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23602#[inline]
23603#[target_feature(enable = "neon,fp16")]
23604#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23605#[cfg(not(target_arch = "arm64ec"))]
23606#[cfg_attr(test, assert_instr(frintx))]
23607pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23608    unsafe { simd_round_ties_even(a) }
23609}
23610#[doc = "Floating-point round to integral exact, using current rounding mode"]
23611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23612#[inline]
23613#[target_feature(enable = "neon,fp16")]
23614#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23615#[cfg(not(target_arch = "arm64ec"))]
23616#[cfg_attr(test, assert_instr(frintx))]
23617pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23618    unsafe { simd_round_ties_even(a) }
23619}
23620#[doc = "Floating-point round to integral exact, using current rounding mode"]
23621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23622#[inline]
23623#[target_feature(enable = "neon")]
23624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23625#[cfg_attr(test, assert_instr(frintx))]
23626pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23627    unsafe { simd_round_ties_even(a) }
23628}
23629#[doc = "Floating-point round to integral exact, using current rounding mode"]
23630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23631#[inline]
23632#[target_feature(enable = "neon")]
23633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23634#[cfg_attr(test, assert_instr(frintx))]
23635pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23636    unsafe { simd_round_ties_even(a) }
23637}
23638#[doc = "Floating-point round to integral exact, using current rounding mode"]
23639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23640#[inline]
23641#[target_feature(enable = "neon")]
23642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23643#[cfg_attr(test, assert_instr(frintx))]
23644pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23645    unsafe { simd_round_ties_even(a) }
23646}
23647#[doc = "Floating-point round to integral exact, using current rounding mode"]
23648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23649#[inline]
23650#[target_feature(enable = "neon")]
23651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23652#[cfg_attr(test, assert_instr(frintx))]
23653pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
23654    unsafe { simd_round_ties_even(a) }
23655}
23656#[doc = "Floating-point round to integral, using current rounding mode"]
23657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
23658#[inline]
23659#[target_feature(enable = "neon,fp16")]
23660#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23661#[cfg(not(target_arch = "arm64ec"))]
23662#[cfg_attr(test, assert_instr(frintx))]
23663pub fn vrndxh_f16(a: f16) -> f16 {
23664    round_ties_even_f16(a)
23665}
23666#[doc = "Signed rounding shift left"]
23667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
23668#[inline]
23669#[target_feature(enable = "neon")]
23670#[cfg_attr(test, assert_instr(srshl))]
23671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23672pub fn vrshld_s64(a: i64, b: i64) -> i64 {
23673    unsafe extern "unadjusted" {
23674        #[cfg_attr(
23675            any(target_arch = "aarch64", target_arch = "arm64ec"),
23676            link_name = "llvm.aarch64.neon.srshl.i64"
23677        )]
23678        fn _vrshld_s64(a: i64, b: i64) -> i64;
23679    }
23680    unsafe { _vrshld_s64(a, b) }
23681}
23682#[doc = "Unsigned rounding shift left"]
23683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
23684#[inline]
23685#[target_feature(enable = "neon")]
23686#[cfg_attr(test, assert_instr(urshl))]
23687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23688pub fn vrshld_u64(a: u64, b: i64) -> u64 {
23689    unsafe extern "unadjusted" {
23690        #[cfg_attr(
23691            any(target_arch = "aarch64", target_arch = "arm64ec"),
23692            link_name = "llvm.aarch64.neon.urshl.i64"
23693        )]
23694        fn _vrshld_u64(a: u64, b: i64) -> u64;
23695    }
23696    unsafe { _vrshld_u64(a, b) }
23697}
23698#[doc = "Signed rounding shift right"]
23699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
23700#[inline]
23701#[target_feature(enable = "neon")]
23702#[cfg_attr(test, assert_instr(srshr, N = 2))]
23703#[rustc_legacy_const_generics(1)]
23704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23705pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
23706    static_assert!(N >= 1 && N <= 64);
23707    vrshld_s64(a, -N as i64)
23708}
23709#[doc = "Unsigned rounding shift right"]
23710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
23711#[inline]
23712#[target_feature(enable = "neon")]
23713#[cfg_attr(test, assert_instr(urshr, N = 2))]
23714#[rustc_legacy_const_generics(1)]
23715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23716pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
23717    static_assert!(N >= 1 && N <= 64);
23718    vrshld_u64(a, -N as i64)
23719}
23720#[doc = "Rounding shift right narrow"]
23721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
23722#[inline]
23723#[target_feature(enable = "neon")]
23724#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23725#[rustc_legacy_const_generics(2)]
23726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23727pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23728    static_assert!(N >= 1 && N <= 8);
23729    unsafe {
23730        simd_shuffle!(
23731            a,
23732            vrshrn_n_s16::<N>(b),
23733            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23734        )
23735    }
23736}
23737#[doc = "Rounding shift right narrow"]
23738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
23739#[inline]
23740#[target_feature(enable = "neon")]
23741#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23742#[rustc_legacy_const_generics(2)]
23743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23744pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23745    static_assert!(N >= 1 && N <= 16);
23746    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23747}
23748#[doc = "Rounding shift right narrow"]
23749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
23750#[inline]
23751#[target_feature(enable = "neon")]
23752#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23753#[rustc_legacy_const_generics(2)]
23754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23755pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23756    static_assert!(N >= 1 && N <= 32);
23757    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23758}
23759#[doc = "Rounding shift right narrow"]
23760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
23761#[inline]
23762#[target_feature(enable = "neon")]
23763#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23764#[rustc_legacy_const_generics(2)]
23765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23766pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23767    static_assert!(N >= 1 && N <= 8);
23768    unsafe {
23769        simd_shuffle!(
23770            a,
23771            vrshrn_n_u16::<N>(b),
23772            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23773        )
23774    }
23775}
23776#[doc = "Rounding shift right narrow"]
23777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
23778#[inline]
23779#[target_feature(enable = "neon")]
23780#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23781#[rustc_legacy_const_generics(2)]
23782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23783pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23784    static_assert!(N >= 1 && N <= 16);
23785    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23786}
23787#[doc = "Rounding shift right narrow"]
23788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
23789#[inline]
23790#[target_feature(enable = "neon")]
23791#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23792#[rustc_legacy_const_generics(2)]
23793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23794pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23795    static_assert!(N >= 1 && N <= 32);
23796    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23797}
23798#[doc = "Reciprocal square-root estimate."]
23799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
23800#[inline]
23801#[target_feature(enable = "neon")]
23802#[cfg_attr(test, assert_instr(frsqrte))]
23803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23804pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
23805    unsafe extern "unadjusted" {
23806        #[cfg_attr(
23807            any(target_arch = "aarch64", target_arch = "arm64ec"),
23808            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
23809        )]
23810        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
23811    }
23812    unsafe { _vrsqrte_f64(a) }
23813}
23814#[doc = "Reciprocal square-root estimate."]
23815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
23816#[inline]
23817#[target_feature(enable = "neon")]
23818#[cfg_attr(test, assert_instr(frsqrte))]
23819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23820pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
23821    unsafe extern "unadjusted" {
23822        #[cfg_attr(
23823            any(target_arch = "aarch64", target_arch = "arm64ec"),
23824            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
23825        )]
23826        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
23827    }
23828    unsafe { _vrsqrteq_f64(a) }
23829}
23830#[doc = "Reciprocal square-root estimate."]
23831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
23832#[inline]
23833#[target_feature(enable = "neon")]
23834#[cfg_attr(test, assert_instr(frsqrte))]
23835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23836pub fn vrsqrted_f64(a: f64) -> f64 {
23837    unsafe extern "unadjusted" {
23838        #[cfg_attr(
23839            any(target_arch = "aarch64", target_arch = "arm64ec"),
23840            link_name = "llvm.aarch64.neon.frsqrte.f64"
23841        )]
23842        fn _vrsqrted_f64(a: f64) -> f64;
23843    }
23844    unsafe { _vrsqrted_f64(a) }
23845}
23846#[doc = "Reciprocal square-root estimate."]
23847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
23848#[inline]
23849#[target_feature(enable = "neon")]
23850#[cfg_attr(test, assert_instr(frsqrte))]
23851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23852pub fn vrsqrtes_f32(a: f32) -> f32 {
23853    unsafe extern "unadjusted" {
23854        #[cfg_attr(
23855            any(target_arch = "aarch64", target_arch = "arm64ec"),
23856            link_name = "llvm.aarch64.neon.frsqrte.f32"
23857        )]
23858        fn _vrsqrtes_f32(a: f32) -> f32;
23859    }
23860    unsafe { _vrsqrtes_f32(a) }
23861}
23862#[doc = "Reciprocal square-root estimate."]
23863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
23864#[inline]
23865#[cfg_attr(test, assert_instr(frsqrte))]
23866#[target_feature(enable = "neon,fp16")]
23867#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23868#[cfg(not(target_arch = "arm64ec"))]
23869pub fn vrsqrteh_f16(a: f16) -> f16 {
23870    unsafe extern "unadjusted" {
23871        #[cfg_attr(
23872            any(target_arch = "aarch64", target_arch = "arm64ec"),
23873            link_name = "llvm.aarch64.neon.frsqrte.f16"
23874        )]
23875        fn _vrsqrteh_f16(a: f16) -> f16;
23876    }
23877    unsafe { _vrsqrteh_f16(a) }
23878}
23879#[doc = "Floating-point reciprocal square root step"]
23880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
23881#[inline]
23882#[target_feature(enable = "neon")]
23883#[cfg_attr(test, assert_instr(frsqrts))]
23884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23885pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
23886    unsafe extern "unadjusted" {
23887        #[cfg_attr(
23888            any(target_arch = "aarch64", target_arch = "arm64ec"),
23889            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
23890        )]
23891        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
23892    }
23893    unsafe { _vrsqrts_f64(a, b) }
23894}
23895#[doc = "Floating-point reciprocal square root step"]
23896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
23897#[inline]
23898#[target_feature(enable = "neon")]
23899#[cfg_attr(test, assert_instr(frsqrts))]
23900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23901pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
23902    unsafe extern "unadjusted" {
23903        #[cfg_attr(
23904            any(target_arch = "aarch64", target_arch = "arm64ec"),
23905            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
23906        )]
23907        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
23908    }
23909    unsafe { _vrsqrtsq_f64(a, b) }
23910}
23911#[doc = "Floating-point reciprocal square root step"]
23912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
23913#[inline]
23914#[target_feature(enable = "neon")]
23915#[cfg_attr(test, assert_instr(frsqrts))]
23916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23917pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
23918    unsafe extern "unadjusted" {
23919        #[cfg_attr(
23920            any(target_arch = "aarch64", target_arch = "arm64ec"),
23921            link_name = "llvm.aarch64.neon.frsqrts.f64"
23922        )]
23923        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
23924    }
23925    unsafe { _vrsqrtsd_f64(a, b) }
23926}
23927#[doc = "Floating-point reciprocal square root step"]
23928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
23929#[inline]
23930#[target_feature(enable = "neon")]
23931#[cfg_attr(test, assert_instr(frsqrts))]
23932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23933pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
23934    unsafe extern "unadjusted" {
23935        #[cfg_attr(
23936            any(target_arch = "aarch64", target_arch = "arm64ec"),
23937            link_name = "llvm.aarch64.neon.frsqrts.f32"
23938        )]
23939        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
23940    }
23941    unsafe { _vrsqrtss_f32(a, b) }
23942}
23943#[doc = "Floating-point reciprocal square root step"]
23944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
23945#[inline]
23946#[target_feature(enable = "neon,fp16")]
23947#[cfg_attr(test, assert_instr(frsqrts))]
23948#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23949#[cfg(not(target_arch = "arm64ec"))]
23950pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
23951    unsafe extern "unadjusted" {
23952        #[cfg_attr(
23953            any(target_arch = "aarch64", target_arch = "arm64ec"),
23954            link_name = "llvm.aarch64.neon.frsqrts.f16"
23955        )]
23956        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
23957    }
23958    unsafe { _vrsqrtsh_f16(a, b) }
23959}
23960#[doc = "Signed rounding shift right and accumulate."]
23961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
23962#[inline]
23963#[target_feature(enable = "neon")]
23964#[cfg_attr(test, assert_instr(srshr, N = 2))]
23965#[rustc_legacy_const_generics(2)]
23966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23967pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23968    static_assert!(N >= 1 && N <= 64);
23969    let b: i64 = vrshrd_n_s64::<N>(b);
23970    a.wrapping_add(b)
23971}
23972#[doc = "Unsigned rounding shift right and accumulate."]
23973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
23974#[inline]
23975#[target_feature(enable = "neon")]
23976#[cfg_attr(test, assert_instr(urshr, N = 2))]
23977#[rustc_legacy_const_generics(2)]
23978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23979pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23980    static_assert!(N >= 1 && N <= 64);
23981    let b: u64 = vrshrd_n_u64::<N>(b);
23982    a.wrapping_add(b)
23983}
23984#[doc = "Rounding subtract returning high narrow"]
23985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23986#[inline]
23987#[target_feature(enable = "neon")]
23988#[cfg(target_endian = "little")]
23989#[cfg_attr(test, assert_instr(rsubhn2))]
23990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23991pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23992    let x: int8x8_t = vrsubhn_s16(b, c);
23993    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23994}
23995#[doc = "Rounding subtract returning high narrow"]
23996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23997#[inline]
23998#[target_feature(enable = "neon")]
23999#[cfg(target_endian = "little")]
24000#[cfg_attr(test, assert_instr(rsubhn2))]
24001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24002pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24003    let x: int16x4_t = vrsubhn_s32(b, c);
24004    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24005}
24006#[doc = "Rounding subtract returning high narrow"]
24007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24008#[inline]
24009#[target_feature(enable = "neon")]
24010#[cfg(target_endian = "little")]
24011#[cfg_attr(test, assert_instr(rsubhn2))]
24012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24013pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24014    let x: int32x2_t = vrsubhn_s64(b, c);
24015    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24016}
24017#[doc = "Rounding subtract returning high narrow"]
24018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24019#[inline]
24020#[target_feature(enable = "neon")]
24021#[cfg(target_endian = "little")]
24022#[cfg_attr(test, assert_instr(rsubhn2))]
24023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24024pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24025    let x: uint8x8_t = vrsubhn_u16(b, c);
24026    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24027}
24028#[doc = "Rounding subtract returning high narrow"]
24029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24030#[inline]
24031#[target_feature(enable = "neon")]
24032#[cfg(target_endian = "little")]
24033#[cfg_attr(test, assert_instr(rsubhn2))]
24034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24035pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24036    let x: uint16x4_t = vrsubhn_u32(b, c);
24037    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24038}
24039#[doc = "Rounding subtract returning high narrow"]
24040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24041#[inline]
24042#[target_feature(enable = "neon")]
24043#[cfg(target_endian = "little")]
24044#[cfg_attr(test, assert_instr(rsubhn2))]
24045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24046pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24047    let x: uint32x2_t = vrsubhn_u64(b, c);
24048    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24049}
24050#[doc = "Rounding subtract returning high narrow"]
24051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24052#[inline]
24053#[target_feature(enable = "neon")]
24054#[cfg(target_endian = "big")]
24055#[cfg_attr(test, assert_instr(rsubhn))]
24056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24057pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24058    let x: int8x8_t = vrsubhn_s16(b, c);
24059    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24060}
24061#[doc = "Rounding subtract returning high narrow"]
24062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24063#[inline]
24064#[target_feature(enable = "neon")]
24065#[cfg(target_endian = "big")]
24066#[cfg_attr(test, assert_instr(rsubhn))]
24067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24068pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24069    let x: int16x4_t = vrsubhn_s32(b, c);
24070    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24071}
24072#[doc = "Rounding subtract returning high narrow"]
24073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24074#[inline]
24075#[target_feature(enable = "neon")]
24076#[cfg(target_endian = "big")]
24077#[cfg_attr(test, assert_instr(rsubhn))]
24078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24079pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24080    let x: int32x2_t = vrsubhn_s64(b, c);
24081    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24082}
24083#[doc = "Rounding subtract returning high narrow"]
24084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24085#[inline]
24086#[target_feature(enable = "neon")]
24087#[cfg(target_endian = "big")]
24088#[cfg_attr(test, assert_instr(rsubhn))]
24089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24090pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24091    let x: uint8x8_t = vrsubhn_u16(b, c);
24092    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24093}
24094#[doc = "Rounding subtract returning high narrow"]
24095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24096#[inline]
24097#[target_feature(enable = "neon")]
24098#[cfg(target_endian = "big")]
24099#[cfg_attr(test, assert_instr(rsubhn))]
24100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24101pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24102    let x: uint16x4_t = vrsubhn_u32(b, c);
24103    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24104}
24105#[doc = "Rounding subtract returning high narrow"]
24106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24107#[inline]
24108#[target_feature(enable = "neon")]
24109#[cfg(target_endian = "big")]
24110#[cfg_attr(test, assert_instr(rsubhn))]
24111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24112pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24113    let x: uint32x2_t = vrsubhn_u64(b, c);
24114    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24115}
24116#[doc = "Insert vector element from another vector element"]
24117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24118#[inline]
24119#[target_feature(enable = "neon")]
24120#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24121#[rustc_legacy_const_generics(2)]
24122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24123pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24124    static_assert!(LANE == 0);
24125    unsafe { simd_insert!(b, LANE as u32, a) }
24126}
24127#[doc = "Insert vector element from another vector element"]
24128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24129#[inline]
24130#[target_feature(enable = "neon")]
24131#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24132#[rustc_legacy_const_generics(2)]
24133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24134pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24135    static_assert_uimm_bits!(LANE, 1);
24136    unsafe { simd_insert!(b, LANE as u32, a) }
24137}
24138#[doc = "SHA512 hash update part 2"]
24139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24140#[inline]
24141#[target_feature(enable = "neon,sha3")]
24142#[cfg_attr(test, assert_instr(sha512h2))]
24143#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24144pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24145    unsafe extern "unadjusted" {
24146        #[cfg_attr(
24147            any(target_arch = "aarch64", target_arch = "arm64ec"),
24148            link_name = "llvm.aarch64.crypto.sha512h2"
24149        )]
24150        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24151    }
24152    unsafe { _vsha512h2q_u64(a, b, c) }
24153}
24154#[doc = "SHA512 hash update part 1"]
24155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24156#[inline]
24157#[target_feature(enable = "neon,sha3")]
24158#[cfg_attr(test, assert_instr(sha512h))]
24159#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24160pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24161    unsafe extern "unadjusted" {
24162        #[cfg_attr(
24163            any(target_arch = "aarch64", target_arch = "arm64ec"),
24164            link_name = "llvm.aarch64.crypto.sha512h"
24165        )]
24166        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24167    }
24168    unsafe { _vsha512hq_u64(a, b, c) }
24169}
24170#[doc = "SHA512 schedule update 0"]
24171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24172#[inline]
24173#[target_feature(enable = "neon,sha3")]
24174#[cfg_attr(test, assert_instr(sha512su0))]
24175#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24176pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24177    unsafe extern "unadjusted" {
24178        #[cfg_attr(
24179            any(target_arch = "aarch64", target_arch = "arm64ec"),
24180            link_name = "llvm.aarch64.crypto.sha512su0"
24181        )]
24182        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24183    }
24184    unsafe { _vsha512su0q_u64(a, b) }
24185}
24186#[doc = "SHA512 schedule update 1"]
24187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24188#[inline]
24189#[target_feature(enable = "neon,sha3")]
24190#[cfg_attr(test, assert_instr(sha512su1))]
24191#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24192pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24193    unsafe extern "unadjusted" {
24194        #[cfg_attr(
24195            any(target_arch = "aarch64", target_arch = "arm64ec"),
24196            link_name = "llvm.aarch64.crypto.sha512su1"
24197        )]
24198        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24199    }
24200    unsafe { _vsha512su1q_u64(a, b, c) }
24201}
24202#[doc = "Signed Shift left"]
24203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24204#[inline]
24205#[target_feature(enable = "neon")]
24206#[cfg_attr(test, assert_instr(sshl))]
24207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24208pub fn vshld_s64(a: i64, b: i64) -> i64 {
24209    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24210}
24211#[doc = "Unsigned Shift left"]
24212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24213#[inline]
24214#[target_feature(enable = "neon")]
24215#[cfg_attr(test, assert_instr(ushl))]
24216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24217pub fn vshld_u64(a: u64, b: i64) -> u64 {
24218    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24219}
24220#[doc = "Signed shift left long"]
24221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24222#[inline]
24223#[target_feature(enable = "neon")]
24224#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24225#[rustc_legacy_const_generics(1)]
24226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24227pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24228    static_assert!(N >= 0 && N <= 8);
24229    unsafe {
24230        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24231        vshll_n_s8::<N>(b)
24232    }
24233}
24234#[doc = "Signed shift left long"]
24235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24236#[inline]
24237#[target_feature(enable = "neon")]
24238#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24239#[rustc_legacy_const_generics(1)]
24240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24241pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24242    static_assert!(N >= 0 && N <= 16);
24243    unsafe {
24244        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24245        vshll_n_s16::<N>(b)
24246    }
24247}
24248#[doc = "Signed shift left long"]
24249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24250#[inline]
24251#[target_feature(enable = "neon")]
24252#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24253#[rustc_legacy_const_generics(1)]
24254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24255pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24256    static_assert!(N >= 0 && N <= 32);
24257    unsafe {
24258        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24259        vshll_n_s32::<N>(b)
24260    }
24261}
24262#[doc = "Signed shift left long"]
24263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24264#[inline]
24265#[target_feature(enable = "neon")]
24266#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24267#[rustc_legacy_const_generics(1)]
24268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24269pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24270    static_assert!(N >= 0 && N <= 8);
24271    unsafe {
24272        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24273        vshll_n_u8::<N>(b)
24274    }
24275}
24276#[doc = "Signed shift left long"]
24277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24278#[inline]
24279#[target_feature(enable = "neon")]
24280#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24281#[rustc_legacy_const_generics(1)]
24282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24283pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24284    static_assert!(N >= 0 && N <= 16);
24285    unsafe {
24286        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24287        vshll_n_u16::<N>(b)
24288    }
24289}
24290#[doc = "Signed shift left long"]
24291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24292#[inline]
24293#[target_feature(enable = "neon")]
24294#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24295#[rustc_legacy_const_generics(1)]
24296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24297pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24298    static_assert!(N >= 0 && N <= 32);
24299    unsafe {
24300        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24301        vshll_n_u32::<N>(b)
24302    }
24303}
24304#[doc = "Shift right narrow"]
24305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24306#[inline]
24307#[target_feature(enable = "neon")]
24308#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24309#[rustc_legacy_const_generics(2)]
24310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24311pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24312    static_assert!(N >= 1 && N <= 8);
24313    unsafe {
24314        simd_shuffle!(
24315            a,
24316            vshrn_n_s16::<N>(b),
24317            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24318        )
24319    }
24320}
24321#[doc = "Shift right narrow"]
24322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24323#[inline]
24324#[target_feature(enable = "neon")]
24325#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24326#[rustc_legacy_const_generics(2)]
24327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24328pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24329    static_assert!(N >= 1 && N <= 16);
24330    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24331}
24332#[doc = "Shift right narrow"]
24333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24334#[inline]
24335#[target_feature(enable = "neon")]
24336#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24337#[rustc_legacy_const_generics(2)]
24338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24339pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24340    static_assert!(N >= 1 && N <= 32);
24341    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24342}
24343#[doc = "Shift right narrow"]
24344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24345#[inline]
24346#[target_feature(enable = "neon")]
24347#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24348#[rustc_legacy_const_generics(2)]
24349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24350pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24351    static_assert!(N >= 1 && N <= 8);
24352    unsafe {
24353        simd_shuffle!(
24354            a,
24355            vshrn_n_u16::<N>(b),
24356            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24357        )
24358    }
24359}
24360#[doc = "Shift right narrow"]
24361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24362#[inline]
24363#[target_feature(enable = "neon")]
24364#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24365#[rustc_legacy_const_generics(2)]
24366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24367pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24368    static_assert!(N >= 1 && N <= 16);
24369    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24370}
24371#[doc = "Shift right narrow"]
24372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24373#[inline]
24374#[target_feature(enable = "neon")]
24375#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24376#[rustc_legacy_const_generics(2)]
24377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24378pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24379    static_assert!(N >= 1 && N <= 32);
24380    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24381}
24382#[doc = "Shift Left and Insert (immediate)"]
24383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24384#[inline]
24385#[target_feature(enable = "neon")]
24386#[cfg_attr(test, assert_instr(sli, N = 1))]
24387#[rustc_legacy_const_generics(2)]
24388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24389pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24390    static_assert_uimm_bits!(N, 3);
24391    unsafe extern "unadjusted" {
24392        #[cfg_attr(
24393            any(target_arch = "aarch64", target_arch = "arm64ec"),
24394            link_name = "llvm.aarch64.neon.vsli.v8i8"
24395        )]
24396        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24397    }
24398    unsafe { _vsli_n_s8(a, b, N) }
24399}
24400#[doc = "Shift Left and Insert (immediate)"]
24401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24402#[inline]
24403#[target_feature(enable = "neon")]
24404#[cfg_attr(test, assert_instr(sli, N = 1))]
24405#[rustc_legacy_const_generics(2)]
24406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24407pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24408    static_assert_uimm_bits!(N, 3);
24409    unsafe extern "unadjusted" {
24410        #[cfg_attr(
24411            any(target_arch = "aarch64", target_arch = "arm64ec"),
24412            link_name = "llvm.aarch64.neon.vsli.v16i8"
24413        )]
24414        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24415    }
24416    unsafe { _vsliq_n_s8(a, b, N) }
24417}
24418#[doc = "Shift Left and Insert (immediate)"]
24419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24420#[inline]
24421#[target_feature(enable = "neon")]
24422#[cfg_attr(test, assert_instr(sli, N = 1))]
24423#[rustc_legacy_const_generics(2)]
24424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24425pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24426    static_assert_uimm_bits!(N, 4);
24427    unsafe extern "unadjusted" {
24428        #[cfg_attr(
24429            any(target_arch = "aarch64", target_arch = "arm64ec"),
24430            link_name = "llvm.aarch64.neon.vsli.v4i16"
24431        )]
24432        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24433    }
24434    unsafe { _vsli_n_s16(a, b, N) }
24435}
24436#[doc = "Shift Left and Insert (immediate)"]
24437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24438#[inline]
24439#[target_feature(enable = "neon")]
24440#[cfg_attr(test, assert_instr(sli, N = 1))]
24441#[rustc_legacy_const_generics(2)]
24442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24443pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24444    static_assert_uimm_bits!(N, 4);
24445    unsafe extern "unadjusted" {
24446        #[cfg_attr(
24447            any(target_arch = "aarch64", target_arch = "arm64ec"),
24448            link_name = "llvm.aarch64.neon.vsli.v8i16"
24449        )]
24450        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24451    }
24452    unsafe { _vsliq_n_s16(a, b, N) }
24453}
24454#[doc = "Shift Left and Insert (immediate)"]
24455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24456#[inline]
24457#[target_feature(enable = "neon")]
24458#[cfg_attr(test, assert_instr(sli, N = 1))]
24459#[rustc_legacy_const_generics(2)]
24460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24461pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24462    static_assert!(N >= 0 && N <= 31);
24463    unsafe extern "unadjusted" {
24464        #[cfg_attr(
24465            any(target_arch = "aarch64", target_arch = "arm64ec"),
24466            link_name = "llvm.aarch64.neon.vsli.v2i32"
24467        )]
24468        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24469    }
24470    unsafe { _vsli_n_s32(a, b, N) }
24471}
24472#[doc = "Shift Left and Insert (immediate)"]
24473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24474#[inline]
24475#[target_feature(enable = "neon")]
24476#[cfg_attr(test, assert_instr(sli, N = 1))]
24477#[rustc_legacy_const_generics(2)]
24478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24479pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24480    static_assert!(N >= 0 && N <= 31);
24481    unsafe extern "unadjusted" {
24482        #[cfg_attr(
24483            any(target_arch = "aarch64", target_arch = "arm64ec"),
24484            link_name = "llvm.aarch64.neon.vsli.v4i32"
24485        )]
24486        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24487    }
24488    unsafe { _vsliq_n_s32(a, b, N) }
24489}
24490#[doc = "Shift Left and Insert (immediate)"]
24491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24492#[inline]
24493#[target_feature(enable = "neon")]
24494#[cfg_attr(test, assert_instr(sli, N = 1))]
24495#[rustc_legacy_const_generics(2)]
24496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24497pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24498    static_assert!(N >= 0 && N <= 63);
24499    unsafe extern "unadjusted" {
24500        #[cfg_attr(
24501            any(target_arch = "aarch64", target_arch = "arm64ec"),
24502            link_name = "llvm.aarch64.neon.vsli.v1i64"
24503        )]
24504        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24505    }
24506    unsafe { _vsli_n_s64(a, b, N) }
24507}
24508#[doc = "Shift Left and Insert (immediate)"]
24509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24510#[inline]
24511#[target_feature(enable = "neon")]
24512#[cfg_attr(test, assert_instr(sli, N = 1))]
24513#[rustc_legacy_const_generics(2)]
24514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24515pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24516    static_assert!(N >= 0 && N <= 63);
24517    unsafe extern "unadjusted" {
24518        #[cfg_attr(
24519            any(target_arch = "aarch64", target_arch = "arm64ec"),
24520            link_name = "llvm.aarch64.neon.vsli.v2i64"
24521        )]
24522        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24523    }
24524    unsafe { _vsliq_n_s64(a, b, N) }
24525}
24526#[doc = "Shift Left and Insert (immediate)"]
24527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24528#[inline]
24529#[target_feature(enable = "neon")]
24530#[cfg_attr(test, assert_instr(sli, N = 1))]
24531#[rustc_legacy_const_generics(2)]
24532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24533pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24534    static_assert_uimm_bits!(N, 3);
24535    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24536}
24537#[doc = "Shift Left and Insert (immediate)"]
24538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24539#[inline]
24540#[target_feature(enable = "neon")]
24541#[cfg_attr(test, assert_instr(sli, N = 1))]
24542#[rustc_legacy_const_generics(2)]
24543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24544pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24545    static_assert_uimm_bits!(N, 3);
24546    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24547}
24548#[doc = "Shift Left and Insert (immediate)"]
24549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24550#[inline]
24551#[target_feature(enable = "neon")]
24552#[cfg_attr(test, assert_instr(sli, N = 1))]
24553#[rustc_legacy_const_generics(2)]
24554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24555pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24556    static_assert_uimm_bits!(N, 4);
24557    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24558}
24559#[doc = "Shift Left and Insert (immediate)"]
24560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24561#[inline]
24562#[target_feature(enable = "neon")]
24563#[cfg_attr(test, assert_instr(sli, N = 1))]
24564#[rustc_legacy_const_generics(2)]
24565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24566pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24567    static_assert_uimm_bits!(N, 4);
24568    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24569}
24570#[doc = "Shift Left and Insert (immediate)"]
24571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24572#[inline]
24573#[target_feature(enable = "neon")]
24574#[cfg_attr(test, assert_instr(sli, N = 1))]
24575#[rustc_legacy_const_generics(2)]
24576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24577pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24578    static_assert!(N >= 0 && N <= 31);
24579    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24580}
24581#[doc = "Shift Left and Insert (immediate)"]
24582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
24583#[inline]
24584#[target_feature(enable = "neon")]
24585#[cfg_attr(test, assert_instr(sli, N = 1))]
24586#[rustc_legacy_const_generics(2)]
24587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24588pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24589    static_assert!(N >= 0 && N <= 31);
24590    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
24591}
24592#[doc = "Shift Left and Insert (immediate)"]
24593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
24594#[inline]
24595#[target_feature(enable = "neon")]
24596#[cfg_attr(test, assert_instr(sli, N = 1))]
24597#[rustc_legacy_const_generics(2)]
24598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24599pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24600    static_assert!(N >= 0 && N <= 63);
24601    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24602}
24603#[doc = "Shift Left and Insert (immediate)"]
24604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
24605#[inline]
24606#[target_feature(enable = "neon")]
24607#[cfg_attr(test, assert_instr(sli, N = 1))]
24608#[rustc_legacy_const_generics(2)]
24609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24610pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24611    static_assert!(N >= 0 && N <= 63);
24612    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24613}
24614#[doc = "Shift Left and Insert (immediate)"]
24615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
24616#[inline]
24617#[target_feature(enable = "neon")]
24618#[cfg_attr(test, assert_instr(sli, N = 1))]
24619#[rustc_legacy_const_generics(2)]
24620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24621pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24622    static_assert_uimm_bits!(N, 3);
24623    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24624}
24625#[doc = "Shift Left and Insert (immediate)"]
24626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
24627#[inline]
24628#[target_feature(enable = "neon")]
24629#[cfg_attr(test, assert_instr(sli, N = 1))]
24630#[rustc_legacy_const_generics(2)]
24631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24632pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24633    static_assert_uimm_bits!(N, 3);
24634    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24635}
24636#[doc = "Shift Left and Insert (immediate)"]
24637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
24638#[inline]
24639#[target_feature(enable = "neon")]
24640#[cfg_attr(test, assert_instr(sli, N = 1))]
24641#[rustc_legacy_const_generics(2)]
24642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24643pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24644    static_assert_uimm_bits!(N, 4);
24645    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24646}
24647#[doc = "Shift Left and Insert (immediate)"]
24648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
24649#[inline]
24650#[target_feature(enable = "neon")]
24651#[cfg_attr(test, assert_instr(sli, N = 1))]
24652#[rustc_legacy_const_generics(2)]
24653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24654pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24655    static_assert_uimm_bits!(N, 4);
24656    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24657}
24658#[doc = "Shift Left and Insert (immediate)"]
24659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
24660#[inline]
24661#[target_feature(enable = "neon,aes")]
24662#[cfg_attr(test, assert_instr(sli, N = 1))]
24663#[rustc_legacy_const_generics(2)]
24664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24665pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24666    static_assert!(N >= 0 && N <= 63);
24667    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24668}
24669#[doc = "Shift Left and Insert (immediate)"]
24670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
24671#[inline]
24672#[target_feature(enable = "neon,aes")]
24673#[cfg_attr(test, assert_instr(sli, N = 1))]
24674#[rustc_legacy_const_generics(2)]
24675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24676pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24677    static_assert!(N >= 0 && N <= 63);
24678    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24679}
24680#[doc = "Shift left and insert"]
24681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
24682#[inline]
24683#[target_feature(enable = "neon")]
24684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24685#[rustc_legacy_const_generics(2)]
24686#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24687pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24688    static_assert!(N >= 0 && N <= 63);
24689    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24690}
24691#[doc = "Shift left and insert"]
24692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
24693#[inline]
24694#[target_feature(enable = "neon")]
24695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24696#[rustc_legacy_const_generics(2)]
24697#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24698pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24699    static_assert!(N >= 0 && N <= 63);
24700    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
24701}
24702#[doc = "SM3PARTW1"]
24703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
24704#[inline]
24705#[target_feature(enable = "neon,sm4")]
24706#[cfg_attr(test, assert_instr(sm3partw1))]
24707#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24708pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24709    unsafe extern "unadjusted" {
24710        #[cfg_attr(
24711            any(target_arch = "aarch64", target_arch = "arm64ec"),
24712            link_name = "llvm.aarch64.crypto.sm3partw1"
24713        )]
24714        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24715    }
24716    unsafe { _vsm3partw1q_u32(a, b, c) }
24717}
24718#[doc = "SM3PARTW2"]
24719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
24720#[inline]
24721#[target_feature(enable = "neon,sm4")]
24722#[cfg_attr(test, assert_instr(sm3partw2))]
24723#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24724pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24725    unsafe extern "unadjusted" {
24726        #[cfg_attr(
24727            any(target_arch = "aarch64", target_arch = "arm64ec"),
24728            link_name = "llvm.aarch64.crypto.sm3partw2"
24729        )]
24730        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24731    }
24732    unsafe { _vsm3partw2q_u32(a, b, c) }
24733}
24734#[doc = "SM3SS1"]
24735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
24736#[inline]
24737#[target_feature(enable = "neon,sm4")]
24738#[cfg_attr(test, assert_instr(sm3ss1))]
24739#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24740pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24741    unsafe extern "unadjusted" {
24742        #[cfg_attr(
24743            any(target_arch = "aarch64", target_arch = "arm64ec"),
24744            link_name = "llvm.aarch64.crypto.sm3ss1"
24745        )]
24746        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24747    }
24748    unsafe { _vsm3ss1q_u32(a, b, c) }
24749}
24750#[doc = "SM3TT1A"]
24751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
24752#[inline]
24753#[target_feature(enable = "neon,sm4")]
24754#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
24755#[rustc_legacy_const_generics(3)]
24756#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24757pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24758    static_assert_uimm_bits!(IMM2, 2);
24759    unsafe extern "unadjusted" {
24760        #[cfg_attr(
24761            any(target_arch = "aarch64", target_arch = "arm64ec"),
24762            link_name = "llvm.aarch64.crypto.sm3tt1a"
24763        )]
24764        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24765    }
24766    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
24767}
24768#[doc = "SM3TT1B"]
24769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
24770#[inline]
24771#[target_feature(enable = "neon,sm4")]
24772#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
24773#[rustc_legacy_const_generics(3)]
24774#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24775pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24776    static_assert_uimm_bits!(IMM2, 2);
24777    unsafe extern "unadjusted" {
24778        #[cfg_attr(
24779            any(target_arch = "aarch64", target_arch = "arm64ec"),
24780            link_name = "llvm.aarch64.crypto.sm3tt1b"
24781        )]
24782        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24783    }
24784    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
24785}
24786#[doc = "SM3TT2A"]
24787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
24788#[inline]
24789#[target_feature(enable = "neon,sm4")]
24790#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
24791#[rustc_legacy_const_generics(3)]
24792#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24793pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24794    static_assert_uimm_bits!(IMM2, 2);
24795    unsafe extern "unadjusted" {
24796        #[cfg_attr(
24797            any(target_arch = "aarch64", target_arch = "arm64ec"),
24798            link_name = "llvm.aarch64.crypto.sm3tt2a"
24799        )]
24800        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24801    }
24802    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
24803}
24804#[doc = "SM3TT2B"]
24805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
24806#[inline]
24807#[target_feature(enable = "neon,sm4")]
24808#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
24809#[rustc_legacy_const_generics(3)]
24810#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24811pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24812    static_assert_uimm_bits!(IMM2, 2);
24813    unsafe extern "unadjusted" {
24814        #[cfg_attr(
24815            any(target_arch = "aarch64", target_arch = "arm64ec"),
24816            link_name = "llvm.aarch64.crypto.sm3tt2b"
24817        )]
24818        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24819    }
24820    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
24821}
24822#[doc = "SM4 key"]
24823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
24824#[inline]
24825#[target_feature(enable = "neon,sm4")]
24826#[cfg_attr(test, assert_instr(sm4ekey))]
24827#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24828pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24829    unsafe extern "unadjusted" {
24830        #[cfg_attr(
24831            any(target_arch = "aarch64", target_arch = "arm64ec"),
24832            link_name = "llvm.aarch64.crypto.sm4ekey"
24833        )]
24834        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24835    }
24836    unsafe { _vsm4ekeyq_u32(a, b) }
24837}
24838#[doc = "SM4 encode"]
24839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
24840#[inline]
24841#[target_feature(enable = "neon,sm4")]
24842#[cfg_attr(test, assert_instr(sm4e))]
24843#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24844pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24845    unsafe extern "unadjusted" {
24846        #[cfg_attr(
24847            any(target_arch = "aarch64", target_arch = "arm64ec"),
24848            link_name = "llvm.aarch64.crypto.sm4e"
24849        )]
24850        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24851    }
24852    unsafe { _vsm4eq_u32(a, b) }
24853}
24854#[doc = "Unsigned saturating Accumulate of Signed value."]
24855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
24856#[inline]
24857#[target_feature(enable = "neon")]
24858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24859#[cfg_attr(test, assert_instr(usqadd))]
24860pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
24861    unsafe extern "unadjusted" {
24862        #[cfg_attr(
24863            any(target_arch = "aarch64", target_arch = "arm64ec"),
24864            link_name = "llvm.aarch64.neon.usqadd.v8i8"
24865        )]
24866        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
24867    }
24868    unsafe { _vsqadd_u8(a, b) }
24869}
24870#[doc = "Unsigned saturating Accumulate of Signed value."]
24871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
24872#[inline]
24873#[target_feature(enable = "neon")]
24874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24875#[cfg_attr(test, assert_instr(usqadd))]
24876pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
24877    unsafe extern "unadjusted" {
24878        #[cfg_attr(
24879            any(target_arch = "aarch64", target_arch = "arm64ec"),
24880            link_name = "llvm.aarch64.neon.usqadd.v16i8"
24881        )]
24882        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
24883    }
24884    unsafe { _vsqaddq_u8(a, b) }
24885}
24886#[doc = "Unsigned saturating Accumulate of Signed value."]
24887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
24888#[inline]
24889#[target_feature(enable = "neon")]
24890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24891#[cfg_attr(test, assert_instr(usqadd))]
24892pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
24893    unsafe extern "unadjusted" {
24894        #[cfg_attr(
24895            any(target_arch = "aarch64", target_arch = "arm64ec"),
24896            link_name = "llvm.aarch64.neon.usqadd.v4i16"
24897        )]
24898        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
24899    }
24900    unsafe { _vsqadd_u16(a, b) }
24901}
24902#[doc = "Unsigned saturating Accumulate of Signed value."]
24903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
24904#[inline]
24905#[target_feature(enable = "neon")]
24906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24907#[cfg_attr(test, assert_instr(usqadd))]
24908pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
24909    unsafe extern "unadjusted" {
24910        #[cfg_attr(
24911            any(target_arch = "aarch64", target_arch = "arm64ec"),
24912            link_name = "llvm.aarch64.neon.usqadd.v8i16"
24913        )]
24914        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
24915    }
24916    unsafe { _vsqaddq_u16(a, b) }
24917}
24918#[doc = "Unsigned saturating Accumulate of Signed value."]
24919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
24920#[inline]
24921#[target_feature(enable = "neon")]
24922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24923#[cfg_attr(test, assert_instr(usqadd))]
24924pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
24925    unsafe extern "unadjusted" {
24926        #[cfg_attr(
24927            any(target_arch = "aarch64", target_arch = "arm64ec"),
24928            link_name = "llvm.aarch64.neon.usqadd.v2i32"
24929        )]
24930        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
24931    }
24932    unsafe { _vsqadd_u32(a, b) }
24933}
24934#[doc = "Unsigned saturating Accumulate of Signed value."]
24935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
24936#[inline]
24937#[target_feature(enable = "neon")]
24938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24939#[cfg_attr(test, assert_instr(usqadd))]
24940pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
24941    unsafe extern "unadjusted" {
24942        #[cfg_attr(
24943            any(target_arch = "aarch64", target_arch = "arm64ec"),
24944            link_name = "llvm.aarch64.neon.usqadd.v4i32"
24945        )]
24946        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
24947    }
24948    unsafe { _vsqaddq_u32(a, b) }
24949}
24950#[doc = "Unsigned saturating Accumulate of Signed value."]
24951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
24952#[inline]
24953#[target_feature(enable = "neon")]
24954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24955#[cfg_attr(test, assert_instr(usqadd))]
24956pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
24957    unsafe extern "unadjusted" {
24958        #[cfg_attr(
24959            any(target_arch = "aarch64", target_arch = "arm64ec"),
24960            link_name = "llvm.aarch64.neon.usqadd.v1i64"
24961        )]
24962        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
24963    }
24964    unsafe { _vsqadd_u64(a, b) }
24965}
24966#[doc = "Unsigned saturating Accumulate of Signed value."]
24967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
24968#[inline]
24969#[target_feature(enable = "neon")]
24970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24971#[cfg_attr(test, assert_instr(usqadd))]
24972pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
24973    unsafe extern "unadjusted" {
24974        #[cfg_attr(
24975            any(target_arch = "aarch64", target_arch = "arm64ec"),
24976            link_name = "llvm.aarch64.neon.usqadd.v2i64"
24977        )]
24978        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
24979    }
24980    unsafe { _vsqaddq_u64(a, b) }
24981}
24982#[doc = "Unsigned saturating accumulate of signed value"]
24983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
24984#[inline]
24985#[target_feature(enable = "neon")]
24986#[cfg_attr(test, assert_instr(usqadd))]
24987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24988pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
24989    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
24990}
24991#[doc = "Unsigned saturating accumulate of signed value"]
24992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
24993#[inline]
24994#[target_feature(enable = "neon")]
24995#[cfg_attr(test, assert_instr(usqadd))]
24996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24997pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
24998    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
24999}
25000#[doc = "Unsigned saturating accumulate of signed value"]
25001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25002#[inline]
25003#[target_feature(enable = "neon")]
25004#[cfg_attr(test, assert_instr(usqadd))]
25005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25006pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25007    unsafe extern "unadjusted" {
25008        #[cfg_attr(
25009            any(target_arch = "aarch64", target_arch = "arm64ec"),
25010            link_name = "llvm.aarch64.neon.usqadd.i64"
25011        )]
25012        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25013    }
25014    unsafe { _vsqaddd_u64(a, b) }
25015}
25016#[doc = "Unsigned saturating accumulate of signed value"]
25017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25018#[inline]
25019#[target_feature(enable = "neon")]
25020#[cfg_attr(test, assert_instr(usqadd))]
25021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25022pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25023    unsafe extern "unadjusted" {
25024        #[cfg_attr(
25025            any(target_arch = "aarch64", target_arch = "arm64ec"),
25026            link_name = "llvm.aarch64.neon.usqadd.i32"
25027        )]
25028        fn _vsqadds_u32(a: u32, b: i32) -> u32;
25029    }
25030    unsafe { _vsqadds_u32(a, b) }
25031}
25032#[doc = "Calculates the square root of each lane."]
25033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25034#[inline]
25035#[cfg_attr(test, assert_instr(fsqrt))]
25036#[target_feature(enable = "neon,fp16")]
25037#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25038#[cfg(not(target_arch = "arm64ec"))]
25039pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25040    unsafe { simd_fsqrt(a) }
25041}
25042#[doc = "Calculates the square root of each lane."]
25043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25044#[inline]
25045#[cfg_attr(test, assert_instr(fsqrt))]
25046#[target_feature(enable = "neon,fp16")]
25047#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25048#[cfg(not(target_arch = "arm64ec"))]
25049pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25050    unsafe { simd_fsqrt(a) }
25051}
25052#[doc = "Calculates the square root of each lane."]
25053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25054#[inline]
25055#[target_feature(enable = "neon")]
25056#[cfg_attr(test, assert_instr(fsqrt))]
25057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25058pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25059    unsafe { simd_fsqrt(a) }
25060}
25061#[doc = "Calculates the square root of each lane."]
25062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25063#[inline]
25064#[target_feature(enable = "neon")]
25065#[cfg_attr(test, assert_instr(fsqrt))]
25066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25067pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25068    unsafe { simd_fsqrt(a) }
25069}
25070#[doc = "Calculates the square root of each lane."]
25071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25072#[inline]
25073#[target_feature(enable = "neon")]
25074#[cfg_attr(test, assert_instr(fsqrt))]
25075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25076pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25077    unsafe { simd_fsqrt(a) }
25078}
25079#[doc = "Calculates the square root of each lane."]
25080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25081#[inline]
25082#[target_feature(enable = "neon")]
25083#[cfg_attr(test, assert_instr(fsqrt))]
25084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25085pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25086    unsafe { simd_fsqrt(a) }
25087}
25088#[doc = "Floating-point round to integral, using current rounding mode"]
25089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25090#[inline]
25091#[target_feature(enable = "neon,fp16")]
25092#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25093#[cfg(not(target_arch = "arm64ec"))]
25094#[cfg_attr(test, assert_instr(fsqrt))]
25095pub fn vsqrth_f16(a: f16) -> f16 {
25096    sqrtf16(a)
25097}
25098#[doc = "Shift Right and Insert (immediate)"]
25099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25100#[inline]
25101#[target_feature(enable = "neon")]
25102#[cfg_attr(test, assert_instr(sri, N = 1))]
25103#[rustc_legacy_const_generics(2)]
25104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25105pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25106    static_assert!(N >= 1 && N <= 8);
25107    unsafe extern "unadjusted" {
25108        #[cfg_attr(
25109            any(target_arch = "aarch64", target_arch = "arm64ec"),
25110            link_name = "llvm.aarch64.neon.vsri.v8i8"
25111        )]
25112        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25113    }
25114    unsafe { _vsri_n_s8(a, b, N) }
25115}
25116#[doc = "Shift Right and Insert (immediate)"]
25117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25118#[inline]
25119#[target_feature(enable = "neon")]
25120#[cfg_attr(test, assert_instr(sri, N = 1))]
25121#[rustc_legacy_const_generics(2)]
25122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25123pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25124    static_assert!(N >= 1 && N <= 8);
25125    unsafe extern "unadjusted" {
25126        #[cfg_attr(
25127            any(target_arch = "aarch64", target_arch = "arm64ec"),
25128            link_name = "llvm.aarch64.neon.vsri.v16i8"
25129        )]
25130        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25131    }
25132    unsafe { _vsriq_n_s8(a, b, N) }
25133}
25134#[doc = "Shift Right and Insert (immediate)"]
25135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25136#[inline]
25137#[target_feature(enable = "neon")]
25138#[cfg_attr(test, assert_instr(sri, N = 1))]
25139#[rustc_legacy_const_generics(2)]
25140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25141pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25142    static_assert!(N >= 1 && N <= 16);
25143    unsafe extern "unadjusted" {
25144        #[cfg_attr(
25145            any(target_arch = "aarch64", target_arch = "arm64ec"),
25146            link_name = "llvm.aarch64.neon.vsri.v4i16"
25147        )]
25148        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25149    }
25150    unsafe { _vsri_n_s16(a, b, N) }
25151}
25152#[doc = "Shift Right and Insert (immediate)"]
25153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25154#[inline]
25155#[target_feature(enable = "neon")]
25156#[cfg_attr(test, assert_instr(sri, N = 1))]
25157#[rustc_legacy_const_generics(2)]
25158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25159pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25160    static_assert!(N >= 1 && N <= 16);
25161    unsafe extern "unadjusted" {
25162        #[cfg_attr(
25163            any(target_arch = "aarch64", target_arch = "arm64ec"),
25164            link_name = "llvm.aarch64.neon.vsri.v8i16"
25165        )]
25166        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25167    }
25168    unsafe { _vsriq_n_s16(a, b, N) }
25169}
25170#[doc = "Shift Right and Insert (immediate)"]
25171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25172#[inline]
25173#[target_feature(enable = "neon")]
25174#[cfg_attr(test, assert_instr(sri, N = 1))]
25175#[rustc_legacy_const_generics(2)]
25176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25177pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25178    static_assert!(N >= 1 && N <= 32);
25179    unsafe extern "unadjusted" {
25180        #[cfg_attr(
25181            any(target_arch = "aarch64", target_arch = "arm64ec"),
25182            link_name = "llvm.aarch64.neon.vsri.v2i32"
25183        )]
25184        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25185    }
25186    unsafe { _vsri_n_s32(a, b, N) }
25187}
25188#[doc = "Shift Right and Insert (immediate)"]
25189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25190#[inline]
25191#[target_feature(enable = "neon")]
25192#[cfg_attr(test, assert_instr(sri, N = 1))]
25193#[rustc_legacy_const_generics(2)]
25194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25195pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25196    static_assert!(N >= 1 && N <= 32);
25197    unsafe extern "unadjusted" {
25198        #[cfg_attr(
25199            any(target_arch = "aarch64", target_arch = "arm64ec"),
25200            link_name = "llvm.aarch64.neon.vsri.v4i32"
25201        )]
25202        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25203    }
25204    unsafe { _vsriq_n_s32(a, b, N) }
25205}
25206#[doc = "Shift Right and Insert (immediate)"]
25207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25208#[inline]
25209#[target_feature(enable = "neon")]
25210#[cfg_attr(test, assert_instr(sri, N = 1))]
25211#[rustc_legacy_const_generics(2)]
25212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25213pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25214    static_assert!(N >= 1 && N <= 64);
25215    unsafe extern "unadjusted" {
25216        #[cfg_attr(
25217            any(target_arch = "aarch64", target_arch = "arm64ec"),
25218            link_name = "llvm.aarch64.neon.vsri.v1i64"
25219        )]
25220        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25221    }
25222    unsafe { _vsri_n_s64(a, b, N) }
25223}
25224#[doc = "Shift Right and Insert (immediate)"]
25225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25226#[inline]
25227#[target_feature(enable = "neon")]
25228#[cfg_attr(test, assert_instr(sri, N = 1))]
25229#[rustc_legacy_const_generics(2)]
25230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25231pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25232    static_assert!(N >= 1 && N <= 64);
25233    unsafe extern "unadjusted" {
25234        #[cfg_attr(
25235            any(target_arch = "aarch64", target_arch = "arm64ec"),
25236            link_name = "llvm.aarch64.neon.vsri.v2i64"
25237        )]
25238        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25239    }
25240    unsafe { _vsriq_n_s64(a, b, N) }
25241}
25242#[doc = "Shift Right and Insert (immediate)"]
25243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25244#[inline]
25245#[target_feature(enable = "neon")]
25246#[cfg_attr(test, assert_instr(sri, N = 1))]
25247#[rustc_legacy_const_generics(2)]
25248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25249pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25250    static_assert!(N >= 1 && N <= 8);
25251    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25252}
25253#[doc = "Shift Right and Insert (immediate)"]
25254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25255#[inline]
25256#[target_feature(enable = "neon")]
25257#[cfg_attr(test, assert_instr(sri, N = 1))]
25258#[rustc_legacy_const_generics(2)]
25259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25260pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25261    static_assert!(N >= 1 && N <= 8);
25262    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25263}
25264#[doc = "Shift Right and Insert (immediate)"]
25265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25266#[inline]
25267#[target_feature(enable = "neon")]
25268#[cfg_attr(test, assert_instr(sri, N = 1))]
25269#[rustc_legacy_const_generics(2)]
25270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25271pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25272    static_assert!(N >= 1 && N <= 16);
25273    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25274}
25275#[doc = "Shift Right and Insert (immediate)"]
25276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25277#[inline]
25278#[target_feature(enable = "neon")]
25279#[cfg_attr(test, assert_instr(sri, N = 1))]
25280#[rustc_legacy_const_generics(2)]
25281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25282pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25283    static_assert!(N >= 1 && N <= 16);
25284    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25285}
25286#[doc = "Shift Right and Insert (immediate)"]
25287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25288#[inline]
25289#[target_feature(enable = "neon")]
25290#[cfg_attr(test, assert_instr(sri, N = 1))]
25291#[rustc_legacy_const_generics(2)]
25292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25293pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25294    static_assert!(N >= 1 && N <= 32);
25295    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25296}
25297#[doc = "Shift Right and Insert (immediate)"]
25298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25299#[inline]
25300#[target_feature(enable = "neon")]
25301#[cfg_attr(test, assert_instr(sri, N = 1))]
25302#[rustc_legacy_const_generics(2)]
25303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25304pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25305    static_assert!(N >= 1 && N <= 32);
25306    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25307}
25308#[doc = "Shift Right and Insert (immediate)"]
25309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25310#[inline]
25311#[target_feature(enable = "neon")]
25312#[cfg_attr(test, assert_instr(sri, N = 1))]
25313#[rustc_legacy_const_generics(2)]
25314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25315pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25316    static_assert!(N >= 1 && N <= 64);
25317    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25318}
25319#[doc = "Shift Right and Insert (immediate)"]
25320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25321#[inline]
25322#[target_feature(enable = "neon")]
25323#[cfg_attr(test, assert_instr(sri, N = 1))]
25324#[rustc_legacy_const_generics(2)]
25325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25326pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25327    static_assert!(N >= 1 && N <= 64);
25328    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25329}
25330#[doc = "Shift Right and Insert (immediate)"]
25331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25332#[inline]
25333#[target_feature(enable = "neon")]
25334#[cfg_attr(test, assert_instr(sri, N = 1))]
25335#[rustc_legacy_const_generics(2)]
25336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25337pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25338    static_assert!(N >= 1 && N <= 8);
25339    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25340}
25341#[doc = "Shift Right and Insert (immediate)"]
25342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25343#[inline]
25344#[target_feature(enable = "neon")]
25345#[cfg_attr(test, assert_instr(sri, N = 1))]
25346#[rustc_legacy_const_generics(2)]
25347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25348pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25349    static_assert!(N >= 1 && N <= 8);
25350    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25351}
25352#[doc = "Shift Right and Insert (immediate)"]
25353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25354#[inline]
25355#[target_feature(enable = "neon")]
25356#[cfg_attr(test, assert_instr(sri, N = 1))]
25357#[rustc_legacy_const_generics(2)]
25358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25359pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25360    static_assert!(N >= 1 && N <= 16);
25361    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25362}
25363#[doc = "Shift Right and Insert (immediate)"]
25364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25365#[inline]
25366#[target_feature(enable = "neon")]
25367#[cfg_attr(test, assert_instr(sri, N = 1))]
25368#[rustc_legacy_const_generics(2)]
25369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25370pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25371    static_assert!(N >= 1 && N <= 16);
25372    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25373}
25374#[doc = "Shift Right and Insert (immediate)"]
25375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25376#[inline]
25377#[target_feature(enable = "neon,aes")]
25378#[cfg_attr(test, assert_instr(sri, N = 1))]
25379#[rustc_legacy_const_generics(2)]
25380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25381pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25382    static_assert!(N >= 1 && N <= 64);
25383    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25384}
25385#[doc = "Shift Right and Insert (immediate)"]
25386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25387#[inline]
25388#[target_feature(enable = "neon,aes")]
25389#[cfg_attr(test, assert_instr(sri, N = 1))]
25390#[rustc_legacy_const_generics(2)]
25391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25392pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25393    static_assert!(N >= 1 && N <= 64);
25394    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25395}
25396#[doc = "Shift right and insert"]
25397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25398#[inline]
25399#[target_feature(enable = "neon")]
25400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25401#[rustc_legacy_const_generics(2)]
25402#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25403pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25404    static_assert!(N >= 1 && N <= 64);
25405    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25406}
25407#[doc = "Shift right and insert"]
25408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25409#[inline]
25410#[target_feature(enable = "neon")]
25411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25412#[rustc_legacy_const_generics(2)]
25413#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25414pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25415    static_assert!(N >= 1 && N <= 64);
25416    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25417}
25418#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25420#[doc = "## Safety"]
25421#[doc = "  * Neon instrinsic unsafe"]
25422#[inline]
25423#[target_feature(enable = "neon,fp16")]
25424#[cfg_attr(test, assert_instr(str))]
25425#[allow(clippy::cast_ptr_alignment)]
25426#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25427#[cfg(not(target_arch = "arm64ec"))]
25428pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25429    crate::ptr::write_unaligned(ptr.cast(), a)
25430}
25431#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25433#[doc = "## Safety"]
25434#[doc = "  * Neon instrinsic unsafe"]
25435#[inline]
25436#[target_feature(enable = "neon,fp16")]
25437#[cfg_attr(test, assert_instr(str))]
25438#[allow(clippy::cast_ptr_alignment)]
25439#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25440#[cfg(not(target_arch = "arm64ec"))]
25441pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25442    crate::ptr::write_unaligned(ptr.cast(), a)
25443}
25444#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25446#[doc = "## Safety"]
25447#[doc = "  * Neon instrinsic unsafe"]
25448#[inline]
25449#[target_feature(enable = "neon")]
25450#[cfg_attr(test, assert_instr(str))]
25451#[allow(clippy::cast_ptr_alignment)]
25452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25453pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25454    crate::ptr::write_unaligned(ptr.cast(), a)
25455}
25456#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25458#[doc = "## Safety"]
25459#[doc = "  * Neon instrinsic unsafe"]
25460#[inline]
25461#[target_feature(enable = "neon")]
25462#[cfg_attr(test, assert_instr(str))]
25463#[allow(clippy::cast_ptr_alignment)]
25464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25465pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25466    crate::ptr::write_unaligned(ptr.cast(), a)
25467}
25468#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25470#[doc = "## Safety"]
25471#[doc = "  * Neon instrinsic unsafe"]
25472#[inline]
25473#[target_feature(enable = "neon")]
25474#[cfg_attr(test, assert_instr(str))]
25475#[allow(clippy::cast_ptr_alignment)]
25476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25477pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25478    crate::ptr::write_unaligned(ptr.cast(), a)
25479}
25480#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25482#[doc = "## Safety"]
25483#[doc = "  * Neon instrinsic unsafe"]
25484#[inline]
25485#[target_feature(enable = "neon")]
25486#[cfg_attr(test, assert_instr(str))]
25487#[allow(clippy::cast_ptr_alignment)]
25488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25489pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25490    crate::ptr::write_unaligned(ptr.cast(), a)
25491}
25492#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25494#[doc = "## Safety"]
25495#[doc = "  * Neon instrinsic unsafe"]
25496#[inline]
25497#[target_feature(enable = "neon")]
25498#[cfg_attr(test, assert_instr(str))]
25499#[allow(clippy::cast_ptr_alignment)]
25500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25501pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25502    crate::ptr::write_unaligned(ptr.cast(), a)
25503}
25504#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25506#[doc = "## Safety"]
25507#[doc = "  * Neon instrinsic unsafe"]
25508#[inline]
25509#[target_feature(enable = "neon")]
25510#[cfg_attr(test, assert_instr(str))]
25511#[allow(clippy::cast_ptr_alignment)]
25512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25513pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25514    crate::ptr::write_unaligned(ptr.cast(), a)
25515}
25516#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25518#[doc = "## Safety"]
25519#[doc = "  * Neon instrinsic unsafe"]
25520#[inline]
25521#[target_feature(enable = "neon")]
25522#[cfg_attr(test, assert_instr(str))]
25523#[allow(clippy::cast_ptr_alignment)]
25524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25525pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25526    crate::ptr::write_unaligned(ptr.cast(), a)
25527}
25528#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25530#[doc = "## Safety"]
25531#[doc = "  * Neon instrinsic unsafe"]
25532#[inline]
25533#[target_feature(enable = "neon")]
25534#[cfg_attr(test, assert_instr(str))]
25535#[allow(clippy::cast_ptr_alignment)]
25536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25537pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25538    crate::ptr::write_unaligned(ptr.cast(), a)
25539}
25540#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25542#[doc = "## Safety"]
25543#[doc = "  * Neon instrinsic unsafe"]
25544#[inline]
25545#[target_feature(enable = "neon")]
25546#[cfg_attr(test, assert_instr(str))]
25547#[allow(clippy::cast_ptr_alignment)]
25548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25549pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25550    crate::ptr::write_unaligned(ptr.cast(), a)
25551}
25552#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25554#[doc = "## Safety"]
25555#[doc = "  * Neon instrinsic unsafe"]
25556#[inline]
25557#[target_feature(enable = "neon")]
25558#[cfg_attr(test, assert_instr(str))]
25559#[allow(clippy::cast_ptr_alignment)]
25560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25561pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25562    crate::ptr::write_unaligned(ptr.cast(), a)
25563}
25564#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25566#[doc = "## Safety"]
25567#[doc = "  * Neon instrinsic unsafe"]
25568#[inline]
25569#[target_feature(enable = "neon")]
25570#[cfg_attr(test, assert_instr(str))]
25571#[allow(clippy::cast_ptr_alignment)]
25572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25573pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25574    crate::ptr::write_unaligned(ptr.cast(), a)
25575}
25576#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25578#[doc = "## Safety"]
25579#[doc = "  * Neon instrinsic unsafe"]
25580#[inline]
25581#[target_feature(enable = "neon")]
25582#[cfg_attr(test, assert_instr(str))]
25583#[allow(clippy::cast_ptr_alignment)]
25584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25585pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
25586    crate::ptr::write_unaligned(ptr.cast(), a)
25587}
25588#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
25590#[doc = "## Safety"]
25591#[doc = "  * Neon instrinsic unsafe"]
25592#[inline]
25593#[target_feature(enable = "neon")]
25594#[cfg_attr(test, assert_instr(str))]
25595#[allow(clippy::cast_ptr_alignment)]
25596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25597pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
25598    crate::ptr::write_unaligned(ptr.cast(), a)
25599}
25600#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
25602#[doc = "## Safety"]
25603#[doc = "  * Neon instrinsic unsafe"]
25604#[inline]
25605#[target_feature(enable = "neon")]
25606#[cfg_attr(test, assert_instr(str))]
25607#[allow(clippy::cast_ptr_alignment)]
25608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25609pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
25610    crate::ptr::write_unaligned(ptr.cast(), a)
25611}
25612#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
25614#[doc = "## Safety"]
25615#[doc = "  * Neon instrinsic unsafe"]
25616#[inline]
25617#[target_feature(enable = "neon")]
25618#[cfg_attr(test, assert_instr(str))]
25619#[allow(clippy::cast_ptr_alignment)]
25620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25621pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
25622    crate::ptr::write_unaligned(ptr.cast(), a)
25623}
25624#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
25626#[doc = "## Safety"]
25627#[doc = "  * Neon instrinsic unsafe"]
25628#[inline]
25629#[target_feature(enable = "neon")]
25630#[cfg_attr(test, assert_instr(str))]
25631#[allow(clippy::cast_ptr_alignment)]
25632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25633pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
25634    crate::ptr::write_unaligned(ptr.cast(), a)
25635}
25636#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
25638#[doc = "## Safety"]
25639#[doc = "  * Neon instrinsic unsafe"]
25640#[inline]
25641#[target_feature(enable = "neon")]
25642#[cfg_attr(test, assert_instr(str))]
25643#[allow(clippy::cast_ptr_alignment)]
25644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25645pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
25646    crate::ptr::write_unaligned(ptr.cast(), a)
25647}
25648#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
25650#[doc = "## Safety"]
25651#[doc = "  * Neon instrinsic unsafe"]
25652#[inline]
25653#[target_feature(enable = "neon")]
25654#[cfg_attr(test, assert_instr(str))]
25655#[allow(clippy::cast_ptr_alignment)]
25656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25657pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
25658    crate::ptr::write_unaligned(ptr.cast(), a)
25659}
25660#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
25662#[doc = "## Safety"]
25663#[doc = "  * Neon instrinsic unsafe"]
25664#[inline]
25665#[target_feature(enable = "neon")]
25666#[cfg_attr(test, assert_instr(str))]
25667#[allow(clippy::cast_ptr_alignment)]
25668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25669pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
25670    crate::ptr::write_unaligned(ptr.cast(), a)
25671}
25672#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
25674#[doc = "## Safety"]
25675#[doc = "  * Neon instrinsic unsafe"]
25676#[inline]
25677#[target_feature(enable = "neon")]
25678#[cfg_attr(test, assert_instr(str))]
25679#[allow(clippy::cast_ptr_alignment)]
25680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25681pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
25682    crate::ptr::write_unaligned(ptr.cast(), a)
25683}
25684#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
25686#[doc = "## Safety"]
25687#[doc = "  * Neon instrinsic unsafe"]
25688#[inline]
25689#[target_feature(enable = "neon")]
25690#[cfg_attr(test, assert_instr(str))]
25691#[allow(clippy::cast_ptr_alignment)]
25692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25693pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
25694    crate::ptr::write_unaligned(ptr.cast(), a)
25695}
25696#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
25698#[doc = "## Safety"]
25699#[doc = "  * Neon instrinsic unsafe"]
25700#[inline]
25701#[target_feature(enable = "neon")]
25702#[cfg_attr(test, assert_instr(str))]
25703#[allow(clippy::cast_ptr_alignment)]
25704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25705pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
25706    crate::ptr::write_unaligned(ptr.cast(), a)
25707}
25708#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
25710#[doc = "## Safety"]
25711#[doc = "  * Neon instrinsic unsafe"]
25712#[inline]
25713#[target_feature(enable = "neon")]
25714#[cfg_attr(test, assert_instr(str))]
25715#[allow(clippy::cast_ptr_alignment)]
25716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25717pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
25718    crate::ptr::write_unaligned(ptr.cast(), a)
25719}
25720#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
25722#[doc = "## Safety"]
25723#[doc = "  * Neon instrinsic unsafe"]
25724#[inline]
25725#[target_feature(enable = "neon")]
25726#[cfg_attr(test, assert_instr(str))]
25727#[allow(clippy::cast_ptr_alignment)]
25728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25729pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
25730    crate::ptr::write_unaligned(ptr.cast(), a)
25731}
25732#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
25734#[doc = "## Safety"]
25735#[doc = "  * Neon instrinsic unsafe"]
25736#[inline]
25737#[target_feature(enable = "neon,aes")]
25738#[cfg_attr(test, assert_instr(str))]
25739#[allow(clippy::cast_ptr_alignment)]
25740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25741pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
25742    crate::ptr::write_unaligned(ptr.cast(), a)
25743}
25744#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
25746#[doc = "## Safety"]
25747#[doc = "  * Neon instrinsic unsafe"]
25748#[inline]
25749#[target_feature(enable = "neon,aes")]
25750#[cfg_attr(test, assert_instr(str))]
25751#[allow(clippy::cast_ptr_alignment)]
25752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25753pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
25754    crate::ptr::write_unaligned(ptr.cast(), a)
25755}
25756#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
25758#[doc = "## Safety"]
25759#[doc = "  * Neon instrinsic unsafe"]
25760#[inline]
25761#[target_feature(enable = "neon")]
25762#[cfg_attr(test, assert_instr(st1))]
25763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25764pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
25765    unsafe extern "unadjusted" {
25766        #[cfg_attr(
25767            any(target_arch = "aarch64", target_arch = "arm64ec"),
25768            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
25769        )]
25770        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
25771    }
25772    _vst1_f64_x2(b.0, b.1, a)
25773}
25774#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
25776#[doc = "## Safety"]
25777#[doc = "  * Neon instrinsic unsafe"]
25778#[inline]
25779#[target_feature(enable = "neon")]
25780#[cfg_attr(test, assert_instr(st1))]
25781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25782pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
25783    unsafe extern "unadjusted" {
25784        #[cfg_attr(
25785            any(target_arch = "aarch64", target_arch = "arm64ec"),
25786            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
25787        )]
25788        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
25789    }
25790    _vst1q_f64_x2(b.0, b.1, a)
25791}
25792#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
25794#[doc = "## Safety"]
25795#[doc = "  * Neon instrinsic unsafe"]
25796#[inline]
25797#[target_feature(enable = "neon")]
25798#[cfg_attr(test, assert_instr(st1))]
25799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25800pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
25801    unsafe extern "unadjusted" {
25802        #[cfg_attr(
25803            any(target_arch = "aarch64", target_arch = "arm64ec"),
25804            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
25805        )]
25806        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
25807    }
25808    _vst1_f64_x3(b.0, b.1, b.2, a)
25809}
25810#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
25812#[doc = "## Safety"]
25813#[doc = "  * Neon instrinsic unsafe"]
25814#[inline]
25815#[target_feature(enable = "neon")]
25816#[cfg_attr(test, assert_instr(st1))]
25817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25818pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
25819    unsafe extern "unadjusted" {
25820        #[cfg_attr(
25821            any(target_arch = "aarch64", target_arch = "arm64ec"),
25822            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
25823        )]
25824        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
25825    }
25826    _vst1q_f64_x3(b.0, b.1, b.2, a)
25827}
25828#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
25830#[doc = "## Safety"]
25831#[doc = "  * Neon instrinsic unsafe"]
25832#[inline]
25833#[target_feature(enable = "neon")]
25834#[cfg_attr(test, assert_instr(st1))]
25835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25836pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
25837    unsafe extern "unadjusted" {
25838        #[cfg_attr(
25839            any(target_arch = "aarch64", target_arch = "arm64ec"),
25840            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
25841        )]
25842        fn _vst1_f64_x4(
25843            a: float64x1_t,
25844            b: float64x1_t,
25845            c: float64x1_t,
25846            d: float64x1_t,
25847            ptr: *mut f64,
25848        );
25849    }
25850    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
25851}
25852#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
25854#[doc = "## Safety"]
25855#[doc = "  * Neon instrinsic unsafe"]
25856#[inline]
25857#[target_feature(enable = "neon")]
25858#[cfg_attr(test, assert_instr(st1))]
25859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25860pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
25861    unsafe extern "unadjusted" {
25862        #[cfg_attr(
25863            any(target_arch = "aarch64", target_arch = "arm64ec"),
25864            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
25865        )]
25866        fn _vst1q_f64_x4(
25867            a: float64x2_t,
25868            b: float64x2_t,
25869            c: float64x2_t,
25870            d: float64x2_t,
25871            ptr: *mut f64,
25872        );
25873    }
25874    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
25875}
25876#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
25878#[doc = "## Safety"]
25879#[doc = "  * Neon instrinsic unsafe"]
25880#[inline]
25881#[target_feature(enable = "neon")]
25882#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25883#[rustc_legacy_const_generics(2)]
25884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25885pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
25886    static_assert!(LANE == 0);
25887    *a = simd_extract!(b, LANE as u32);
25888}
25889#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
25891#[doc = "## Safety"]
25892#[doc = "  * Neon instrinsic unsafe"]
25893#[inline]
25894#[target_feature(enable = "neon")]
25895#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25896#[rustc_legacy_const_generics(2)]
25897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25898pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
25899    static_assert_uimm_bits!(LANE, 1);
25900    *a = simd_extract!(b, LANE as u32);
25901}
25902#[doc = "Store multiple 2-element structures from two registers"]
25903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
25904#[doc = "## Safety"]
25905#[doc = "  * Neon instrinsic unsafe"]
25906#[inline]
25907#[target_feature(enable = "neon")]
25908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25909#[cfg_attr(test, assert_instr(st1))]
25910pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
25911    unsafe extern "unadjusted" {
25912        #[cfg_attr(
25913            any(target_arch = "aarch64", target_arch = "arm64ec"),
25914            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
25915        )]
25916        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
25917    }
25918    _vst2_f64(b.0, b.1, a as _)
25919}
25920#[doc = "Store multiple 2-element structures from two registers"]
25921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
25922#[doc = "## Safety"]
25923#[doc = "  * Neon instrinsic unsafe"]
25924#[inline]
25925#[target_feature(enable = "neon")]
25926#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25927#[rustc_legacy_const_generics(2)]
25928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25929pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
25930    static_assert!(LANE == 0);
25931    unsafe extern "unadjusted" {
25932        #[cfg_attr(
25933            any(target_arch = "aarch64", target_arch = "arm64ec"),
25934            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
25935        )]
25936        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
25937    }
25938    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
25939}
25940#[doc = "Store multiple 2-element structures from two registers"]
25941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
25942#[doc = "## Safety"]
25943#[doc = "  * Neon instrinsic unsafe"]
25944#[inline]
25945#[target_feature(enable = "neon")]
25946#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25947#[rustc_legacy_const_generics(2)]
25948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25949pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
25950    static_assert!(LANE == 0);
25951    unsafe extern "unadjusted" {
25952        #[cfg_attr(
25953            any(target_arch = "aarch64", target_arch = "arm64ec"),
25954            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
25955        )]
25956        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
25957    }
25958    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
25959}
25960#[doc = "Store multiple 2-element structures from two registers"]
25961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
25962#[doc = "## Safety"]
25963#[doc = "  * Neon instrinsic unsafe"]
25964#[inline]
25965#[target_feature(enable = "neon,aes")]
25966#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25967#[rustc_legacy_const_generics(2)]
25968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25969pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
25970    static_assert!(LANE == 0);
25971    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25972}
25973#[doc = "Store multiple 2-element structures from two registers"]
25974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
25975#[doc = "## Safety"]
25976#[doc = "  * Neon instrinsic unsafe"]
25977#[inline]
25978#[target_feature(enable = "neon")]
25979#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25980#[rustc_legacy_const_generics(2)]
25981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25982pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
25983    static_assert!(LANE == 0);
25984    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25985}
25986#[doc = "Store multiple 2-element structures from two registers"]
25987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
25988#[doc = "## Safety"]
25989#[doc = "  * Neon instrinsic unsafe"]
25990#[inline]
25991#[target_feature(enable = "neon")]
25992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25993#[cfg_attr(test, assert_instr(st2))]
25994pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
25995    unsafe extern "unadjusted" {
25996        #[cfg_attr(
25997            any(target_arch = "aarch64", target_arch = "arm64ec"),
25998            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
25999        )]
26000        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26001    }
26002    _vst2q_f64(b.0, b.1, a as _)
26003}
26004#[doc = "Store multiple 2-element structures from two registers"]
26005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26006#[doc = "## Safety"]
26007#[doc = "  * Neon instrinsic unsafe"]
26008#[inline]
26009#[target_feature(enable = "neon")]
26010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26011#[cfg_attr(test, assert_instr(st2))]
26012pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26013    unsafe extern "unadjusted" {
26014        #[cfg_attr(
26015            any(target_arch = "aarch64", target_arch = "arm64ec"),
26016            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
26017        )]
26018        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26019    }
26020    _vst2q_s64(b.0, b.1, a as _)
26021}
26022#[doc = "Store multiple 2-element structures from two registers"]
26023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26024#[doc = "## Safety"]
26025#[doc = "  * Neon instrinsic unsafe"]
26026#[inline]
26027#[target_feature(enable = "neon")]
26028#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26029#[rustc_legacy_const_generics(2)]
26030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26031pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26032    static_assert_uimm_bits!(LANE, 1);
26033    unsafe extern "unadjusted" {
26034        #[cfg_attr(
26035            any(target_arch = "aarch64", target_arch = "arm64ec"),
26036            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
26037        )]
26038        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26039    }
26040    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26041}
26042#[doc = "Store multiple 2-element structures from two registers"]
26043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26044#[doc = "## Safety"]
26045#[doc = "  * Neon instrinsic unsafe"]
26046#[inline]
26047#[target_feature(enable = "neon")]
26048#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26049#[rustc_legacy_const_generics(2)]
26050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26051pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26052    static_assert_uimm_bits!(LANE, 4);
26053    unsafe extern "unadjusted" {
26054        #[cfg_attr(
26055            any(target_arch = "aarch64", target_arch = "arm64ec"),
26056            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
26057        )]
26058        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26059    }
26060    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26061}
26062#[doc = "Store multiple 2-element structures from two registers"]
26063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26064#[doc = "## Safety"]
26065#[doc = "  * Neon instrinsic unsafe"]
26066#[inline]
26067#[target_feature(enable = "neon")]
26068#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26069#[rustc_legacy_const_generics(2)]
26070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26071pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26072    static_assert_uimm_bits!(LANE, 1);
26073    unsafe extern "unadjusted" {
26074        #[cfg_attr(
26075            any(target_arch = "aarch64", target_arch = "arm64ec"),
26076            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
26077        )]
26078        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26079    }
26080    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26081}
26082#[doc = "Store multiple 2-element structures from two registers"]
26083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26084#[doc = "## Safety"]
26085#[doc = "  * Neon instrinsic unsafe"]
26086#[inline]
26087#[target_feature(enable = "neon,aes")]
26088#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26089#[rustc_legacy_const_generics(2)]
26090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26091pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26092    static_assert_uimm_bits!(LANE, 1);
26093    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26094}
26095#[doc = "Store multiple 2-element structures from two registers"]
26096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26097#[doc = "## Safety"]
26098#[doc = "  * Neon instrinsic unsafe"]
26099#[inline]
26100#[target_feature(enable = "neon")]
26101#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26102#[rustc_legacy_const_generics(2)]
26103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26104pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26105    static_assert_uimm_bits!(LANE, 4);
26106    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26107}
26108#[doc = "Store multiple 2-element structures from two registers"]
26109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26110#[doc = "## Safety"]
26111#[doc = "  * Neon instrinsic unsafe"]
26112#[inline]
26113#[target_feature(enable = "neon")]
26114#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26115#[rustc_legacy_const_generics(2)]
26116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26117pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26118    static_assert_uimm_bits!(LANE, 1);
26119    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26120}
26121#[doc = "Store multiple 2-element structures from two registers"]
26122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26123#[doc = "## Safety"]
26124#[doc = "  * Neon instrinsic unsafe"]
26125#[inline]
26126#[target_feature(enable = "neon")]
26127#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26128#[rustc_legacy_const_generics(2)]
26129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26130pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26131    static_assert_uimm_bits!(LANE, 4);
26132    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26133}
26134#[doc = "Store multiple 2-element structures from two registers"]
26135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26136#[doc = "## Safety"]
26137#[doc = "  * Neon instrinsic unsafe"]
26138#[inline]
26139#[target_feature(enable = "neon,aes")]
26140#[cfg_attr(test, assert_instr(st2))]
26141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26142pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26143    vst2q_s64(transmute(a), transmute(b))
26144}
26145#[doc = "Store multiple 2-element structures from two registers"]
26146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26147#[doc = "## Safety"]
26148#[doc = "  * Neon instrinsic unsafe"]
26149#[inline]
26150#[target_feature(enable = "neon")]
26151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26152#[cfg_attr(test, assert_instr(st2))]
26153pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26154    vst2q_s64(transmute(a), transmute(b))
26155}
26156#[doc = "Store multiple 3-element structures from three registers"]
26157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26158#[doc = "## Safety"]
26159#[doc = "  * Neon instrinsic unsafe"]
26160#[inline]
26161#[target_feature(enable = "neon")]
26162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26163#[cfg_attr(test, assert_instr(nop))]
26164pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26165    unsafe extern "unadjusted" {
26166        #[cfg_attr(
26167            any(target_arch = "aarch64", target_arch = "arm64ec"),
26168            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
26169        )]
26170        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26171    }
26172    _vst3_f64(b.0, b.1, b.2, a as _)
26173}
26174#[doc = "Store multiple 3-element structures from three registers"]
26175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26176#[doc = "## Safety"]
26177#[doc = "  * Neon instrinsic unsafe"]
26178#[inline]
26179#[target_feature(enable = "neon")]
26180#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26181#[rustc_legacy_const_generics(2)]
26182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26183pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26184    static_assert!(LANE == 0);
26185    unsafe extern "unadjusted" {
26186        #[cfg_attr(
26187            any(target_arch = "aarch64", target_arch = "arm64ec"),
26188            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
26189        )]
26190        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26191    }
26192    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26193}
26194#[doc = "Store multiple 3-element structures from three registers"]
26195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26196#[doc = "## Safety"]
26197#[doc = "  * Neon instrinsic unsafe"]
26198#[inline]
26199#[target_feature(enable = "neon")]
26200#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26201#[rustc_legacy_const_generics(2)]
26202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26203pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26204    static_assert!(LANE == 0);
26205    unsafe extern "unadjusted" {
26206        #[cfg_attr(
26207            any(target_arch = "aarch64", target_arch = "arm64ec"),
26208            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
26209        )]
26210        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26211    }
26212    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26213}
26214#[doc = "Store multiple 3-element structures from three registers"]
26215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26216#[doc = "## Safety"]
26217#[doc = "  * Neon instrinsic unsafe"]
26218#[inline]
26219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26220#[target_feature(enable = "neon,aes")]
26221#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26222#[rustc_legacy_const_generics(2)]
26223pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26224    static_assert!(LANE == 0);
26225    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26226}
26227#[doc = "Store multiple 3-element structures from three registers"]
26228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26229#[doc = "## Safety"]
26230#[doc = "  * Neon instrinsic unsafe"]
26231#[inline]
26232#[target_feature(enable = "neon")]
26233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26234#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26235#[rustc_legacy_const_generics(2)]
26236pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26237    static_assert!(LANE == 0);
26238    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26239}
26240#[doc = "Store multiple 3-element structures from three registers"]
26241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26242#[doc = "## Safety"]
26243#[doc = "  * Neon instrinsic unsafe"]
26244#[inline]
26245#[target_feature(enable = "neon")]
26246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26247#[cfg_attr(test, assert_instr(st3))]
26248pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26249    unsafe extern "unadjusted" {
26250        #[cfg_attr(
26251            any(target_arch = "aarch64", target_arch = "arm64ec"),
26252            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26253        )]
26254        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26255    }
26256    _vst3q_f64(b.0, b.1, b.2, a as _)
26257}
26258#[doc = "Store multiple 3-element structures from three registers"]
26259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26260#[doc = "## Safety"]
26261#[doc = "  * Neon instrinsic unsafe"]
26262#[inline]
26263#[target_feature(enable = "neon")]
26264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26265#[cfg_attr(test, assert_instr(st3))]
26266pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26267    unsafe extern "unadjusted" {
26268        #[cfg_attr(
26269            any(target_arch = "aarch64", target_arch = "arm64ec"),
26270            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26271        )]
26272        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26273    }
26274    _vst3q_s64(b.0, b.1, b.2, a as _)
26275}
26276#[doc = "Store multiple 3-element structures from three registers"]
26277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26278#[doc = "## Safety"]
26279#[doc = "  * Neon instrinsic unsafe"]
26280#[inline]
26281#[target_feature(enable = "neon")]
26282#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26283#[rustc_legacy_const_generics(2)]
26284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26285pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26286    static_assert_uimm_bits!(LANE, 1);
26287    unsafe extern "unadjusted" {
26288        #[cfg_attr(
26289            any(target_arch = "aarch64", target_arch = "arm64ec"),
26290            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26291        )]
26292        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26293    }
26294    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26295}
26296#[doc = "Store multiple 3-element structures from three registers"]
26297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26298#[doc = "## Safety"]
26299#[doc = "  * Neon instrinsic unsafe"]
26300#[inline]
26301#[target_feature(enable = "neon")]
26302#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26303#[rustc_legacy_const_generics(2)]
26304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26305pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26306    static_assert_uimm_bits!(LANE, 4);
26307    unsafe extern "unadjusted" {
26308        #[cfg_attr(
26309            any(target_arch = "aarch64", target_arch = "arm64ec"),
26310            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26311        )]
26312        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26313    }
26314    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26315}
26316#[doc = "Store multiple 3-element structures from three registers"]
26317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26318#[doc = "## Safety"]
26319#[doc = "  * Neon instrinsic unsafe"]
26320#[inline]
26321#[target_feature(enable = "neon")]
26322#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26323#[rustc_legacy_const_generics(2)]
26324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26325pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26326    static_assert_uimm_bits!(LANE, 1);
26327    unsafe extern "unadjusted" {
26328        #[cfg_attr(
26329            any(target_arch = "aarch64", target_arch = "arm64ec"),
26330            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26331        )]
26332        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26333    }
26334    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26335}
26336#[doc = "Store multiple 3-element structures from three registers"]
26337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26338#[doc = "## Safety"]
26339#[doc = "  * Neon instrinsic unsafe"]
26340#[inline]
26341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26342#[target_feature(enable = "neon,aes")]
26343#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26344#[rustc_legacy_const_generics(2)]
26345pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26346    static_assert_uimm_bits!(LANE, 1);
26347    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26348}
26349#[doc = "Store multiple 3-element structures from three registers"]
26350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26351#[doc = "## Safety"]
26352#[doc = "  * Neon instrinsic unsafe"]
26353#[inline]
26354#[target_feature(enable = "neon")]
26355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26356#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26357#[rustc_legacy_const_generics(2)]
26358pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26359    static_assert_uimm_bits!(LANE, 4);
26360    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26361}
26362#[doc = "Store multiple 3-element structures from three registers"]
26363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26364#[doc = "## Safety"]
26365#[doc = "  * Neon instrinsic unsafe"]
26366#[inline]
26367#[target_feature(enable = "neon")]
26368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26369#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26370#[rustc_legacy_const_generics(2)]
26371pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26372    static_assert_uimm_bits!(LANE, 1);
26373    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26374}
26375#[doc = "Store multiple 3-element structures from three registers"]
26376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26377#[doc = "## Safety"]
26378#[doc = "  * Neon instrinsic unsafe"]
26379#[inline]
26380#[target_feature(enable = "neon")]
26381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26382#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26383#[rustc_legacy_const_generics(2)]
26384pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26385    static_assert_uimm_bits!(LANE, 4);
26386    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26387}
26388#[doc = "Store multiple 3-element structures from three registers"]
26389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26390#[doc = "## Safety"]
26391#[doc = "  * Neon instrinsic unsafe"]
26392#[inline]
26393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26394#[target_feature(enable = "neon,aes")]
26395#[cfg_attr(test, assert_instr(st3))]
26396pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26397    vst3q_s64(transmute(a), transmute(b))
26398}
26399#[doc = "Store multiple 3-element structures from three registers"]
26400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26401#[doc = "## Safety"]
26402#[doc = "  * Neon instrinsic unsafe"]
26403#[inline]
26404#[target_feature(enable = "neon")]
26405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26406#[cfg_attr(test, assert_instr(st3))]
26407pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26408    vst3q_s64(transmute(a), transmute(b))
26409}
26410#[doc = "Store multiple 4-element structures from four registers"]
26411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26412#[doc = "## Safety"]
26413#[doc = "  * Neon instrinsic unsafe"]
26414#[inline]
26415#[target_feature(enable = "neon")]
26416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26417#[cfg_attr(test, assert_instr(nop))]
26418pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26419    unsafe extern "unadjusted" {
26420        #[cfg_attr(
26421            any(target_arch = "aarch64", target_arch = "arm64ec"),
26422            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26423        )]
26424        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26425    }
26426    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26427}
26428#[doc = "Store multiple 4-element structures from four registers"]
26429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26430#[doc = "## Safety"]
26431#[doc = "  * Neon instrinsic unsafe"]
26432#[inline]
26433#[target_feature(enable = "neon")]
26434#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26435#[rustc_legacy_const_generics(2)]
26436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26437pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26438    static_assert!(LANE == 0);
26439    unsafe extern "unadjusted" {
26440        #[cfg_attr(
26441            any(target_arch = "aarch64", target_arch = "arm64ec"),
26442            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26443        )]
26444        fn _vst4_lane_f64(
26445            a: float64x1_t,
26446            b: float64x1_t,
26447            c: float64x1_t,
26448            d: float64x1_t,
26449            n: i64,
26450            ptr: *mut i8,
26451        );
26452    }
26453    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26454}
26455#[doc = "Store multiple 4-element structures from four registers"]
26456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26457#[doc = "## Safety"]
26458#[doc = "  * Neon instrinsic unsafe"]
26459#[inline]
26460#[target_feature(enable = "neon")]
26461#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26462#[rustc_legacy_const_generics(2)]
26463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26464pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26465    static_assert!(LANE == 0);
26466    unsafe extern "unadjusted" {
26467        #[cfg_attr(
26468            any(target_arch = "aarch64", target_arch = "arm64ec"),
26469            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26470        )]
26471        fn _vst4_lane_s64(
26472            a: int64x1_t,
26473            b: int64x1_t,
26474            c: int64x1_t,
26475            d: int64x1_t,
26476            n: i64,
26477            ptr: *mut i8,
26478        );
26479    }
26480    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26481}
26482#[doc = "Store multiple 4-element structures from four registers"]
26483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26484#[doc = "## Safety"]
26485#[doc = "  * Neon instrinsic unsafe"]
26486#[inline]
26487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26488#[target_feature(enable = "neon,aes")]
26489#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26490#[rustc_legacy_const_generics(2)]
26491pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26492    static_assert!(LANE == 0);
26493    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26494}
26495#[doc = "Store multiple 4-element structures from four registers"]
26496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26497#[doc = "## Safety"]
26498#[doc = "  * Neon instrinsic unsafe"]
26499#[inline]
26500#[target_feature(enable = "neon")]
26501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26502#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26503#[rustc_legacy_const_generics(2)]
26504pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26505    static_assert!(LANE == 0);
26506    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26507}
26508#[doc = "Store multiple 4-element structures from four registers"]
26509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26510#[doc = "## Safety"]
26511#[doc = "  * Neon instrinsic unsafe"]
26512#[inline]
26513#[target_feature(enable = "neon")]
26514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26515#[cfg_attr(test, assert_instr(st4))]
26516pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26517    unsafe extern "unadjusted" {
26518        #[cfg_attr(
26519            any(target_arch = "aarch64", target_arch = "arm64ec"),
26520            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26521        )]
26522        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26523    }
26524    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26525}
26526#[doc = "Store multiple 4-element structures from four registers"]
26527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26528#[doc = "## Safety"]
26529#[doc = "  * Neon instrinsic unsafe"]
26530#[inline]
26531#[target_feature(enable = "neon")]
26532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26533#[cfg_attr(test, assert_instr(st4))]
26534pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26535    unsafe extern "unadjusted" {
26536        #[cfg_attr(
26537            any(target_arch = "aarch64", target_arch = "arm64ec"),
26538            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26539        )]
26540        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26541    }
26542    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26543}
26544#[doc = "Store multiple 4-element structures from four registers"]
26545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26546#[doc = "## Safety"]
26547#[doc = "  * Neon instrinsic unsafe"]
26548#[inline]
26549#[target_feature(enable = "neon")]
26550#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26551#[rustc_legacy_const_generics(2)]
26552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26553pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26554    static_assert_uimm_bits!(LANE, 1);
26555    unsafe extern "unadjusted" {
26556        #[cfg_attr(
26557            any(target_arch = "aarch64", target_arch = "arm64ec"),
26558            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26559        )]
26560        fn _vst4q_lane_f64(
26561            a: float64x2_t,
26562            b: float64x2_t,
26563            c: float64x2_t,
26564            d: float64x2_t,
26565            n: i64,
26566            ptr: *mut i8,
26567        );
26568    }
26569    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26570}
26571#[doc = "Store multiple 4-element structures from four registers"]
26572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26573#[doc = "## Safety"]
26574#[doc = "  * Neon instrinsic unsafe"]
26575#[inline]
26576#[target_feature(enable = "neon")]
26577#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26578#[rustc_legacy_const_generics(2)]
26579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26580pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
26581    static_assert_uimm_bits!(LANE, 4);
26582    unsafe extern "unadjusted" {
26583        #[cfg_attr(
26584            any(target_arch = "aarch64", target_arch = "arm64ec"),
26585            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
26586        )]
26587        fn _vst4q_lane_s8(
26588            a: int8x16_t,
26589            b: int8x16_t,
26590            c: int8x16_t,
26591            d: int8x16_t,
26592            n: i64,
26593            ptr: *mut i8,
26594        );
26595    }
26596    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26597}
26598#[doc = "Store multiple 4-element structures from four registers"]
26599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
26600#[doc = "## Safety"]
26601#[doc = "  * Neon instrinsic unsafe"]
26602#[inline]
26603#[target_feature(enable = "neon")]
26604#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26605#[rustc_legacy_const_generics(2)]
26606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26607pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
26608    static_assert_uimm_bits!(LANE, 1);
26609    unsafe extern "unadjusted" {
26610        #[cfg_attr(
26611            any(target_arch = "aarch64", target_arch = "arm64ec"),
26612            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
26613        )]
26614        fn _vst4q_lane_s64(
26615            a: int64x2_t,
26616            b: int64x2_t,
26617            c: int64x2_t,
26618            d: int64x2_t,
26619            n: i64,
26620            ptr: *mut i8,
26621        );
26622    }
26623    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26624}
26625#[doc = "Store multiple 4-element structures from four registers"]
26626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
26627#[doc = "## Safety"]
26628#[doc = "  * Neon instrinsic unsafe"]
26629#[inline]
26630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26631#[target_feature(enable = "neon,aes")]
26632#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26633#[rustc_legacy_const_generics(2)]
26634pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
26635    static_assert_uimm_bits!(LANE, 1);
26636    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26637}
26638#[doc = "Store multiple 4-element structures from four registers"]
26639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
26640#[doc = "## Safety"]
26641#[doc = "  * Neon instrinsic unsafe"]
26642#[inline]
26643#[target_feature(enable = "neon")]
26644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26645#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26646#[rustc_legacy_const_generics(2)]
26647pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
26648    static_assert_uimm_bits!(LANE, 4);
26649    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26650}
26651#[doc = "Store multiple 4-element structures from four registers"]
26652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
26653#[doc = "## Safety"]
26654#[doc = "  * Neon instrinsic unsafe"]
26655#[inline]
26656#[target_feature(enable = "neon")]
26657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26658#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26659#[rustc_legacy_const_generics(2)]
26660pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
26661    static_assert_uimm_bits!(LANE, 1);
26662    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26663}
26664#[doc = "Store multiple 4-element structures from four registers"]
26665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
26666#[doc = "## Safety"]
26667#[doc = "  * Neon instrinsic unsafe"]
26668#[inline]
26669#[target_feature(enable = "neon")]
26670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26671#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26672#[rustc_legacy_const_generics(2)]
26673pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
26674    static_assert_uimm_bits!(LANE, 4);
26675    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26676}
26677#[doc = "Store multiple 4-element structures from four registers"]
26678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
26679#[doc = "## Safety"]
26680#[doc = "  * Neon instrinsic unsafe"]
26681#[inline]
26682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26683#[target_feature(enable = "neon,aes")]
26684#[cfg_attr(test, assert_instr(st4))]
26685pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
26686    vst4q_s64(transmute(a), transmute(b))
26687}
26688#[doc = "Store multiple 4-element structures from four registers"]
26689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
26690#[doc = "## Safety"]
26691#[doc = "  * Neon instrinsic unsafe"]
26692#[inline]
26693#[target_feature(enable = "neon")]
26694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26695#[cfg_attr(test, assert_instr(st4))]
26696pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
26697    vst4q_s64(transmute(a), transmute(b))
26698}
26699#[doc = "Subtract"]
26700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
26701#[inline]
26702#[target_feature(enable = "neon")]
26703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26704#[cfg_attr(test, assert_instr(fsub))]
26705pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
26706    unsafe { simd_sub(a, b) }
26707}
26708#[doc = "Subtract"]
26709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
26710#[inline]
26711#[target_feature(enable = "neon")]
26712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26713#[cfg_attr(test, assert_instr(fsub))]
26714pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26715    unsafe { simd_sub(a, b) }
26716}
26717#[doc = "Subtract"]
26718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
26719#[inline]
26720#[target_feature(enable = "neon")]
26721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26722#[cfg_attr(test, assert_instr(sub))]
26723pub fn vsubd_s64(a: i64, b: i64) -> i64 {
26724    a.wrapping_sub(b)
26725}
26726#[doc = "Subtract"]
26727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
26728#[inline]
26729#[target_feature(enable = "neon")]
26730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26731#[cfg_attr(test, assert_instr(sub))]
26732pub fn vsubd_u64(a: u64, b: u64) -> u64 {
26733    a.wrapping_sub(b)
26734}
26735#[doc = "Subtract"]
26736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
26737#[inline]
26738#[target_feature(enable = "neon,fp16")]
26739#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26740#[cfg(not(target_arch = "arm64ec"))]
26741#[cfg_attr(test, assert_instr(fsub))]
26742pub fn vsubh_f16(a: f16, b: f16) -> f16 {
26743    a - b
26744}
26745#[doc = "Signed Subtract Long"]
26746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
26747#[inline]
26748#[target_feature(enable = "neon")]
26749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26750#[cfg_attr(test, assert_instr(ssubl2))]
26751pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
26752    unsafe {
26753        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26754        let d: int16x8_t = simd_cast(c);
26755        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26756        let f: int16x8_t = simd_cast(e);
26757        simd_sub(d, f)
26758    }
26759}
26760#[doc = "Signed Subtract Long"]
26761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
26762#[inline]
26763#[target_feature(enable = "neon")]
26764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26765#[cfg_attr(test, assert_instr(ssubl2))]
26766pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
26767    unsafe {
26768        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26769        let d: int32x4_t = simd_cast(c);
26770        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26771        let f: int32x4_t = simd_cast(e);
26772        simd_sub(d, f)
26773    }
26774}
26775#[doc = "Signed Subtract Long"]
26776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
26777#[inline]
26778#[target_feature(enable = "neon")]
26779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26780#[cfg_attr(test, assert_instr(ssubl2))]
26781pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
26782    unsafe {
26783        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
26784        let d: int64x2_t = simd_cast(c);
26785        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26786        let f: int64x2_t = simd_cast(e);
26787        simd_sub(d, f)
26788    }
26789}
26790#[doc = "Unsigned Subtract Long"]
26791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
26792#[inline]
26793#[target_feature(enable = "neon")]
26794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26795#[cfg_attr(test, assert_instr(usubl2))]
26796pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
26797    unsafe {
26798        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26799        let d: uint16x8_t = simd_cast(c);
26800        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26801        let f: uint16x8_t = simd_cast(e);
26802        simd_sub(d, f)
26803    }
26804}
26805#[doc = "Unsigned Subtract Long"]
26806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
26807#[inline]
26808#[target_feature(enable = "neon")]
26809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26810#[cfg_attr(test, assert_instr(usubl2))]
26811pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
26812    unsafe {
26813        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26814        let d: uint32x4_t = simd_cast(c);
26815        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26816        let f: uint32x4_t = simd_cast(e);
26817        simd_sub(d, f)
26818    }
26819}
26820#[doc = "Unsigned Subtract Long"]
26821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
26822#[inline]
26823#[target_feature(enable = "neon")]
26824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26825#[cfg_attr(test, assert_instr(usubl2))]
26826pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
26827    unsafe {
26828        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
26829        let d: uint64x2_t = simd_cast(c);
26830        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26831        let f: uint64x2_t = simd_cast(e);
26832        simd_sub(d, f)
26833    }
26834}
26835#[doc = "Signed Subtract Wide"]
26836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
26837#[inline]
26838#[target_feature(enable = "neon")]
26839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26840#[cfg_attr(test, assert_instr(ssubw2))]
26841pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
26842    unsafe {
26843        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26844        simd_sub(a, simd_cast(c))
26845    }
26846}
26847#[doc = "Signed Subtract Wide"]
26848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
26849#[inline]
26850#[target_feature(enable = "neon")]
26851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26852#[cfg_attr(test, assert_instr(ssubw2))]
26853pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
26854    unsafe {
26855        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26856        simd_sub(a, simd_cast(c))
26857    }
26858}
26859#[doc = "Signed Subtract Wide"]
26860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
26861#[inline]
26862#[target_feature(enable = "neon")]
26863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26864#[cfg_attr(test, assert_instr(ssubw2))]
26865pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
26866    unsafe {
26867        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26868        simd_sub(a, simd_cast(c))
26869    }
26870}
26871#[doc = "Unsigned Subtract Wide"]
26872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
26873#[inline]
26874#[target_feature(enable = "neon")]
26875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26876#[cfg_attr(test, assert_instr(usubw2))]
26877pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
26878    unsafe {
26879        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26880        simd_sub(a, simd_cast(c))
26881    }
26882}
26883#[doc = "Unsigned Subtract Wide"]
26884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
26885#[inline]
26886#[target_feature(enable = "neon")]
26887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26888#[cfg_attr(test, assert_instr(usubw2))]
26889pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
26890    unsafe {
26891        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26892        simd_sub(a, simd_cast(c))
26893    }
26894}
26895#[doc = "Unsigned Subtract Wide"]
26896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
26897#[inline]
26898#[target_feature(enable = "neon")]
26899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26900#[cfg_attr(test, assert_instr(usubw2))]
26901pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
26902    unsafe {
26903        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26904        simd_sub(a, simd_cast(c))
26905    }
26906}
26907#[doc = "Dot product index form with signed and unsigned integers"]
26908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
26909#[inline]
26910#[target_feature(enable = "neon,i8mm")]
26911#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26912#[rustc_legacy_const_generics(3)]
26913#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26914pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
26915    static_assert_uimm_bits!(LANE, 2);
26916    unsafe {
26917        let c: uint32x4_t = transmute(c);
26918        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
26919        vusdot_s32(a, transmute(c), b)
26920    }
26921}
26922#[doc = "Dot product index form with signed and unsigned integers"]
26923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
26924#[inline]
26925#[target_feature(enable = "neon,i8mm")]
26926#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26927#[rustc_legacy_const_generics(3)]
26928#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26929pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
26930    static_assert_uimm_bits!(LANE, 2);
26931    unsafe {
26932        let c: uint32x4_t = transmute(c);
26933        let c: uint32x4_t =
26934            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
26935        vusdotq_s32(a, transmute(c), b)
26936    }
26937}
26938#[doc = "Table look-up"]
26939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
26940#[inline]
26941#[target_feature(enable = "neon")]
26942#[cfg_attr(test, assert_instr(tbl))]
26943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26944pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26945    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
26946        {
26947            transmute(b)
26948        }
26949    })
26950}
26951#[doc = "Table look-up"]
26952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
26953#[inline]
26954#[target_feature(enable = "neon")]
26955#[cfg_attr(test, assert_instr(tbl))]
26956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26957pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26958    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
26959}
26960#[doc = "Table look-up"]
26961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
26962#[inline]
26963#[target_feature(enable = "neon")]
26964#[cfg_attr(test, assert_instr(tbl))]
26965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26966pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
26967    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
26968}
26969#[doc = "Table look-up"]
26970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
26971#[inline]
26972#[target_feature(enable = "neon")]
26973#[cfg_attr(test, assert_instr(tbl))]
26974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26975pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
26976    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
26977}
26978#[doc = "Table look-up"]
26979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26980#[inline]
26981#[cfg(target_endian = "little")]
26982#[target_feature(enable = "neon")]
26983#[cfg_attr(test, assert_instr(tbl))]
26984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26985pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26986    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
26987}
26988#[doc = "Table look-up"]
26989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26990#[inline]
26991#[cfg(target_endian = "big")]
26992#[target_feature(enable = "neon")]
26993#[cfg_attr(test, assert_instr(tbl))]
26994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26995pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26996    let mut a: uint8x8x2_t = a;
26997    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26998    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26999    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27000    unsafe {
27001        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27002        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27003    }
27004}
27005#[doc = "Table look-up"]
27006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27007#[inline]
27008#[cfg(target_endian = "little")]
27009#[target_feature(enable = "neon")]
27010#[cfg_attr(test, assert_instr(tbl))]
27011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27012pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27013    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27014}
27015#[doc = "Table look-up"]
27016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27017#[inline]
27018#[cfg(target_endian = "big")]
27019#[target_feature(enable = "neon")]
27020#[cfg_attr(test, assert_instr(tbl))]
27021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27022pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27023    let mut a: poly8x8x2_t = a;
27024    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27025    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27026    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27027    unsafe {
27028        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27029        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27030    }
27031}
27032#[doc = "Table look-up"]
27033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27034#[inline]
27035#[target_feature(enable = "neon")]
27036#[cfg_attr(test, assert_instr(tbl))]
27037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27038pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27039    let x = int8x16x2_t(
27040        vcombine_s8(a.0, a.1),
27041        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27042    );
27043    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27044}
27045#[doc = "Table look-up"]
27046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27047#[inline]
27048#[cfg(target_endian = "little")]
27049#[target_feature(enable = "neon")]
27050#[cfg_attr(test, assert_instr(tbl))]
27051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27052pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27053    let x = uint8x16x2_t(
27054        vcombine_u8(a.0, a.1),
27055        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27056    );
27057    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27058}
27059#[doc = "Table look-up"]
27060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27061#[inline]
27062#[cfg(target_endian = "big")]
27063#[target_feature(enable = "neon")]
27064#[cfg_attr(test, assert_instr(tbl))]
27065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27066pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27067    let mut a: uint8x8x3_t = a;
27068    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27069    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27070    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27071    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27072    let x = uint8x16x2_t(
27073        vcombine_u8(a.0, a.1),
27074        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27075    );
27076    unsafe {
27077        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27078        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27079    }
27080}
27081#[doc = "Table look-up"]
27082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27083#[inline]
27084#[cfg(target_endian = "little")]
27085#[target_feature(enable = "neon")]
27086#[cfg_attr(test, assert_instr(tbl))]
27087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27088pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27089    let x = poly8x16x2_t(
27090        vcombine_p8(a.0, a.1),
27091        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27092    );
27093    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27094}
27095#[doc = "Table look-up"]
27096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27097#[inline]
27098#[cfg(target_endian = "big")]
27099#[target_feature(enable = "neon")]
27100#[cfg_attr(test, assert_instr(tbl))]
27101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27102pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27103    let mut a: poly8x8x3_t = a;
27104    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27105    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27106    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27107    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27108    let x = poly8x16x2_t(
27109        vcombine_p8(a.0, a.1),
27110        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27111    );
27112    unsafe {
27113        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27114        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27115    }
27116}
27117#[doc = "Table look-up"]
27118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27119#[inline]
27120#[target_feature(enable = "neon")]
27121#[cfg_attr(test, assert_instr(tbl))]
27122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27123pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27124    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27125    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27126}
27127#[doc = "Table look-up"]
27128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27129#[inline]
27130#[cfg(target_endian = "little")]
27131#[target_feature(enable = "neon")]
27132#[cfg_attr(test, assert_instr(tbl))]
27133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27134pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27135    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27136    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27137}
27138#[doc = "Table look-up"]
27139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27140#[inline]
27141#[cfg(target_endian = "big")]
27142#[target_feature(enable = "neon")]
27143#[cfg_attr(test, assert_instr(tbl))]
27144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27145pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27146    let mut a: uint8x8x4_t = a;
27147    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27148    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27149    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27150    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27151    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27152    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27153    unsafe {
27154        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27155        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27156    }
27157}
27158#[doc = "Table look-up"]
27159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27160#[inline]
27161#[cfg(target_endian = "little")]
27162#[target_feature(enable = "neon")]
27163#[cfg_attr(test, assert_instr(tbl))]
27164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27165pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27166    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27167    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27168}
27169#[doc = "Table look-up"]
27170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27171#[inline]
27172#[cfg(target_endian = "big")]
27173#[target_feature(enable = "neon")]
27174#[cfg_attr(test, assert_instr(tbl))]
27175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27176pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27177    let mut a: poly8x8x4_t = a;
27178    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27179    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27180    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27181    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27182    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27183    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27184    unsafe {
27185        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27186        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27187    }
27188}
27189#[doc = "Extended table look-up"]
27190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27191#[inline]
27192#[target_feature(enable = "neon")]
27193#[cfg_attr(test, assert_instr(tbx))]
27194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27195pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27196    unsafe {
27197        simd_select(
27198            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27199            transmute(vqtbx1(
27200                transmute(a),
27201                transmute(vcombine_s8(b, crate::mem::zeroed())),
27202                transmute(c),
27203            )),
27204            a,
27205        )
27206    }
27207}
27208#[doc = "Extended table look-up"]
27209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27210#[inline]
27211#[target_feature(enable = "neon")]
27212#[cfg_attr(test, assert_instr(tbx))]
27213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27214pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27215    unsafe {
27216        simd_select(
27217            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27218            transmute(vqtbx1(
27219                transmute(a),
27220                transmute(vcombine_u8(b, crate::mem::zeroed())),
27221                c,
27222            )),
27223            a,
27224        )
27225    }
27226}
27227#[doc = "Extended table look-up"]
27228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27229#[inline]
27230#[target_feature(enable = "neon")]
27231#[cfg_attr(test, assert_instr(tbx))]
27232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27233pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27234    unsafe {
27235        simd_select(
27236            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27237            transmute(vqtbx1(
27238                transmute(a),
27239                transmute(vcombine_p8(b, crate::mem::zeroed())),
27240                c,
27241            )),
27242            a,
27243        )
27244    }
27245}
27246#[doc = "Extended table look-up"]
27247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27248#[inline]
27249#[target_feature(enable = "neon")]
27250#[cfg_attr(test, assert_instr(tbx))]
27251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27252pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27253    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27254}
27255#[doc = "Extended table look-up"]
27256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27257#[inline]
27258#[cfg(target_endian = "little")]
27259#[target_feature(enable = "neon")]
27260#[cfg_attr(test, assert_instr(tbx))]
27261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27262pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27263    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27264}
27265#[doc = "Extended table look-up"]
27266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27267#[inline]
27268#[cfg(target_endian = "big")]
27269#[target_feature(enable = "neon")]
27270#[cfg_attr(test, assert_instr(tbx))]
27271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27272pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27273    let mut b: uint8x8x2_t = b;
27274    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27275    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27276    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27277    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27278    unsafe {
27279        let ret_val: uint8x8_t =
27280            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27281        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27282    }
27283}
27284#[doc = "Extended table look-up"]
27285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27286#[inline]
27287#[cfg(target_endian = "little")]
27288#[target_feature(enable = "neon")]
27289#[cfg_attr(test, assert_instr(tbx))]
27290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27291pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27292    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27293}
27294#[doc = "Extended table look-up"]
27295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27296#[inline]
27297#[cfg(target_endian = "big")]
27298#[target_feature(enable = "neon")]
27299#[cfg_attr(test, assert_instr(tbx))]
27300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27301pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27302    let mut b: poly8x8x2_t = b;
27303    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27304    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27305    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27306    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27307    unsafe {
27308        let ret_val: poly8x8_t =
27309            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27310        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27311    }
27312}
27313#[doc = "Extended table look-up"]
27314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27315#[inline]
27316#[target_feature(enable = "neon")]
27317#[cfg_attr(test, assert_instr(tbx))]
27318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27319pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27320    let x = int8x16x2_t(
27321        vcombine_s8(b.0, b.1),
27322        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27323    );
27324    unsafe {
27325        transmute(simd_select(
27326            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27327            transmute(vqtbx2(
27328                transmute(a),
27329                transmute(x.0),
27330                transmute(x.1),
27331                transmute(c),
27332            )),
27333            a,
27334        ))
27335    }
27336}
27337#[doc = "Extended table look-up"]
27338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27339#[inline]
27340#[cfg(target_endian = "little")]
27341#[target_feature(enable = "neon")]
27342#[cfg_attr(test, assert_instr(tbx))]
27343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27344pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27345    let x = uint8x16x2_t(
27346        vcombine_u8(b.0, b.1),
27347        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27348    );
27349    unsafe {
27350        transmute(simd_select(
27351            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27352            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27353            a,
27354        ))
27355    }
27356}
27357#[doc = "Extended table look-up"]
27358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27359#[inline]
27360#[cfg(target_endian = "big")]
27361#[target_feature(enable = "neon")]
27362#[cfg_attr(test, assert_instr(tbx))]
27363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27364pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27365    let mut b: uint8x8x3_t = b;
27366    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27367    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27368    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27369    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27370    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27371    let x = uint8x16x2_t(
27372        vcombine_u8(b.0, b.1),
27373        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27374    );
27375    unsafe {
27376        let ret_val: uint8x8_t = transmute(simd_select(
27377            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27378            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27379            a,
27380        ));
27381        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27382    }
27383}
27384#[doc = "Extended table look-up"]
27385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27386#[inline]
27387#[cfg(target_endian = "little")]
27388#[target_feature(enable = "neon")]
27389#[cfg_attr(test, assert_instr(tbx))]
27390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27391pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27392    let x = poly8x16x2_t(
27393        vcombine_p8(b.0, b.1),
27394        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27395    );
27396    unsafe {
27397        transmute(simd_select(
27398            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27399            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27400            a,
27401        ))
27402    }
27403}
27404#[doc = "Extended table look-up"]
27405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27406#[inline]
27407#[cfg(target_endian = "big")]
27408#[target_feature(enable = "neon")]
27409#[cfg_attr(test, assert_instr(tbx))]
27410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27411pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27412    let mut b: poly8x8x3_t = b;
27413    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27414    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27415    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27416    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27417    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27418    let x = poly8x16x2_t(
27419        vcombine_p8(b.0, b.1),
27420        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27421    );
27422    unsafe {
27423        let ret_val: poly8x8_t = transmute(simd_select(
27424            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27425            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27426            a,
27427        ));
27428        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27429    }
27430}
27431#[doc = "Extended table look-up"]
27432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27433#[inline]
27434#[target_feature(enable = "neon")]
27435#[cfg_attr(test, assert_instr(tbx))]
27436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27437pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27438    unsafe {
27439        vqtbx2(
27440            transmute(a),
27441            transmute(vcombine_s8(b.0, b.1)),
27442            transmute(vcombine_s8(b.2, b.3)),
27443            transmute(c),
27444        )
27445    }
27446}
27447#[doc = "Extended table look-up"]
27448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27449#[inline]
27450#[cfg(target_endian = "little")]
27451#[target_feature(enable = "neon")]
27452#[cfg_attr(test, assert_instr(tbx))]
27453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27454pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27455    unsafe {
27456        transmute(vqtbx2(
27457            transmute(a),
27458            transmute(vcombine_u8(b.0, b.1)),
27459            transmute(vcombine_u8(b.2, b.3)),
27460            c,
27461        ))
27462    }
27463}
27464#[doc = "Extended table look-up"]
27465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27466#[inline]
27467#[cfg(target_endian = "big")]
27468#[target_feature(enable = "neon")]
27469#[cfg_attr(test, assert_instr(tbx))]
27470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27471pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27472    let mut b: uint8x8x4_t = b;
27473    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27474    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27475    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27476    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27477    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27478    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27479    unsafe {
27480        let ret_val: uint8x8_t = transmute(vqtbx2(
27481            transmute(a),
27482            transmute(vcombine_u8(b.0, b.1)),
27483            transmute(vcombine_u8(b.2, b.3)),
27484            c,
27485        ));
27486        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27487    }
27488}
27489#[doc = "Extended table look-up"]
27490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27491#[inline]
27492#[cfg(target_endian = "little")]
27493#[target_feature(enable = "neon")]
27494#[cfg_attr(test, assert_instr(tbx))]
27495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27496pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27497    unsafe {
27498        transmute(vqtbx2(
27499            transmute(a),
27500            transmute(vcombine_p8(b.0, b.1)),
27501            transmute(vcombine_p8(b.2, b.3)),
27502            c,
27503        ))
27504    }
27505}
27506#[doc = "Extended table look-up"]
27507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27508#[inline]
27509#[cfg(target_endian = "big")]
27510#[target_feature(enable = "neon")]
27511#[cfg_attr(test, assert_instr(tbx))]
27512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27513pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27514    let mut b: poly8x8x4_t = b;
27515    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27516    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27517    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27518    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27519    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27520    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27521    unsafe {
27522        let ret_val: poly8x8_t = transmute(vqtbx2(
27523            transmute(a),
27524            transmute(vcombine_p8(b.0, b.1)),
27525            transmute(vcombine_p8(b.2, b.3)),
27526            c,
27527        ));
27528        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27529    }
27530}
27531#[doc = "Transpose vectors"]
27532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
27533#[inline]
27534#[target_feature(enable = "neon,fp16")]
27535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27536#[cfg(not(target_arch = "arm64ec"))]
27537#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27538pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27539    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27540}
27541#[doc = "Transpose vectors"]
27542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
27543#[inline]
27544#[target_feature(enable = "neon,fp16")]
27545#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27546#[cfg(not(target_arch = "arm64ec"))]
27547#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27548pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27549    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27550}
27551#[doc = "Transpose vectors"]
27552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
27553#[inline]
27554#[target_feature(enable = "neon")]
27555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27556#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27557pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27558    unsafe { simd_shuffle!(a, b, [0, 2]) }
27559}
27560#[doc = "Transpose vectors"]
27561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
27562#[inline]
27563#[target_feature(enable = "neon")]
27564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27565#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27566pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27567    unsafe { simd_shuffle!(a, b, [0, 2]) }
27568}
27569#[doc = "Transpose vectors"]
27570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
27571#[inline]
27572#[target_feature(enable = "neon")]
27573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27574#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27575pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27576    unsafe { simd_shuffle!(a, b, [0, 2]) }
27577}
27578#[doc = "Transpose vectors"]
27579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
27580#[inline]
27581#[target_feature(enable = "neon")]
27582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27583#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27584pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27585    unsafe { simd_shuffle!(a, b, [0, 2]) }
27586}
27587#[doc = "Transpose vectors"]
27588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
27589#[inline]
27590#[target_feature(enable = "neon")]
27591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27592#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27593pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27594    unsafe { simd_shuffle!(a, b, [0, 2]) }
27595}
27596#[doc = "Transpose vectors"]
27597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
27598#[inline]
27599#[target_feature(enable = "neon")]
27600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27601#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27602pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27603    unsafe { simd_shuffle!(a, b, [0, 2]) }
27604}
27605#[doc = "Transpose vectors"]
27606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
27607#[inline]
27608#[target_feature(enable = "neon")]
27609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27610#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27611pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27612    unsafe { simd_shuffle!(a, b, [0, 2]) }
27613}
27614#[doc = "Transpose vectors"]
27615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
27616#[inline]
27617#[target_feature(enable = "neon")]
27618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27619#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27620pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27621    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27622}
27623#[doc = "Transpose vectors"]
27624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
27625#[inline]
27626#[target_feature(enable = "neon")]
27627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27628#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27629pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27630    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27631}
27632#[doc = "Transpose vectors"]
27633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
27634#[inline]
27635#[target_feature(enable = "neon")]
27636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27637#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27638pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27639    unsafe {
27640        simd_shuffle!(
27641            a,
27642            b,
27643            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27644        )
27645    }
27646}
27647#[doc = "Transpose vectors"]
27648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
27649#[inline]
27650#[target_feature(enable = "neon")]
27651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27652#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27653pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27654    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27655}
27656#[doc = "Transpose vectors"]
27657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
27658#[inline]
27659#[target_feature(enable = "neon")]
27660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27661#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27662pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27663    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27664}
27665#[doc = "Transpose vectors"]
27666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
27667#[inline]
27668#[target_feature(enable = "neon")]
27669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27670#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27671pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27672    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27673}
27674#[doc = "Transpose vectors"]
27675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
27676#[inline]
27677#[target_feature(enable = "neon")]
27678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27679#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27680pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27681    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27682}
27683#[doc = "Transpose vectors"]
27684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
27685#[inline]
27686#[target_feature(enable = "neon")]
27687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27688#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27689pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27690    unsafe {
27691        simd_shuffle!(
27692            a,
27693            b,
27694            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27695        )
27696    }
27697}
27698#[doc = "Transpose vectors"]
27699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
27700#[inline]
27701#[target_feature(enable = "neon")]
27702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27703#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27704pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27705    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27706}
27707#[doc = "Transpose vectors"]
27708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
27709#[inline]
27710#[target_feature(enable = "neon")]
27711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27712#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27713pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27714    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27715}
27716#[doc = "Transpose vectors"]
27717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
27718#[inline]
27719#[target_feature(enable = "neon")]
27720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27721#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27722pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27723    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27724}
27725#[doc = "Transpose vectors"]
27726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
27727#[inline]
27728#[target_feature(enable = "neon")]
27729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27730#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27731pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27732    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27733}
27734#[doc = "Transpose vectors"]
27735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
27736#[inline]
27737#[target_feature(enable = "neon")]
27738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27740pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27741    unsafe {
27742        simd_shuffle!(
27743            a,
27744            b,
27745            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27746        )
27747    }
27748}
27749#[doc = "Transpose vectors"]
27750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
27751#[inline]
27752#[target_feature(enable = "neon")]
27753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27754#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27755pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27756    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27757}
27758#[doc = "Transpose vectors"]
27759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
27760#[inline]
27761#[target_feature(enable = "neon")]
27762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27764pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27765    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27766}
27767#[doc = "Transpose vectors"]
27768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
27769#[inline]
27770#[target_feature(enable = "neon,fp16")]
27771#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27772#[cfg(not(target_arch = "arm64ec"))]
27773#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27774pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27775    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27776}
27777#[doc = "Transpose vectors"]
27778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
27779#[inline]
27780#[target_feature(enable = "neon,fp16")]
27781#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27782#[cfg(not(target_arch = "arm64ec"))]
27783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27784pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27785    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27786}
27787#[doc = "Transpose vectors"]
27788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
27789#[inline]
27790#[target_feature(enable = "neon")]
27791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27792#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27793pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27794    unsafe { simd_shuffle!(a, b, [1, 3]) }
27795}
27796#[doc = "Transpose vectors"]
27797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
27798#[inline]
27799#[target_feature(enable = "neon")]
27800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27801#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27802pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27803    unsafe { simd_shuffle!(a, b, [1, 3]) }
27804}
27805#[doc = "Transpose vectors"]
27806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
27807#[inline]
27808#[target_feature(enable = "neon")]
27809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27810#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27811pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27812    unsafe { simd_shuffle!(a, b, [1, 3]) }
27813}
27814#[doc = "Transpose vectors"]
27815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
27816#[inline]
27817#[target_feature(enable = "neon")]
27818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27820pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27821    unsafe { simd_shuffle!(a, b, [1, 3]) }
27822}
27823#[doc = "Transpose vectors"]
27824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
27825#[inline]
27826#[target_feature(enable = "neon")]
27827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27829pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27830    unsafe { simd_shuffle!(a, b, [1, 3]) }
27831}
27832#[doc = "Transpose vectors"]
27833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
27834#[inline]
27835#[target_feature(enable = "neon")]
27836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27837#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27838pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27839    unsafe { simd_shuffle!(a, b, [1, 3]) }
27840}
27841#[doc = "Transpose vectors"]
27842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
27843#[inline]
27844#[target_feature(enable = "neon")]
27845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27846#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27847pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27848    unsafe { simd_shuffle!(a, b, [1, 3]) }
27849}
27850#[doc = "Transpose vectors"]
27851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
27852#[inline]
27853#[target_feature(enable = "neon")]
27854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27856pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27857    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27858}
27859#[doc = "Transpose vectors"]
27860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
27861#[inline]
27862#[target_feature(enable = "neon")]
27863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27864#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27865pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27866    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27867}
27868#[doc = "Transpose vectors"]
27869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
27870#[inline]
27871#[target_feature(enable = "neon")]
27872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27873#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27874pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27875    unsafe {
27876        simd_shuffle!(
27877            a,
27878            b,
27879            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27880        )
27881    }
27882}
27883#[doc = "Transpose vectors"]
27884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
27885#[inline]
27886#[target_feature(enable = "neon")]
27887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27888#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27889pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27890    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27891}
27892#[doc = "Transpose vectors"]
27893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
27894#[inline]
27895#[target_feature(enable = "neon")]
27896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27898pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27899    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27900}
27901#[doc = "Transpose vectors"]
27902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
27903#[inline]
27904#[target_feature(enable = "neon")]
27905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27906#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27907pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27908    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27909}
27910#[doc = "Transpose vectors"]
27911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
27912#[inline]
27913#[target_feature(enable = "neon")]
27914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27915#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27916pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27917    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27918}
27919#[doc = "Transpose vectors"]
27920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
27921#[inline]
27922#[target_feature(enable = "neon")]
27923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27925pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27926    unsafe {
27927        simd_shuffle!(
27928            a,
27929            b,
27930            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27931        )
27932    }
27933}
27934#[doc = "Transpose vectors"]
27935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
27936#[inline]
27937#[target_feature(enable = "neon")]
27938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27939#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27940pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27941    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27942}
27943#[doc = "Transpose vectors"]
27944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
27945#[inline]
27946#[target_feature(enable = "neon")]
27947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27948#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27949pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27950    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27951}
27952#[doc = "Transpose vectors"]
27953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
27954#[inline]
27955#[target_feature(enable = "neon")]
27956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27957#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27958pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27959    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27960}
27961#[doc = "Transpose vectors"]
27962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
27963#[inline]
27964#[target_feature(enable = "neon")]
27965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27966#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27967pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27968    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27969}
27970#[doc = "Transpose vectors"]
27971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
27972#[inline]
27973#[target_feature(enable = "neon")]
27974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27975#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27976pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27977    unsafe {
27978        simd_shuffle!(
27979            a,
27980            b,
27981            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27982        )
27983    }
27984}
27985#[doc = "Transpose vectors"]
27986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
27987#[inline]
27988#[target_feature(enable = "neon")]
27989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27990#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27991pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27992    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27993}
27994#[doc = "Transpose vectors"]
27995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
27996#[inline]
27997#[target_feature(enable = "neon")]
27998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27999#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28000pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28001    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28002}
28003#[doc = "Signed compare bitwise Test bits nonzero"]
28004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28005#[inline]
28006#[target_feature(enable = "neon")]
28007#[cfg_attr(test, assert_instr(cmtst))]
28008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28009pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28010    unsafe {
28011        let c: int64x1_t = simd_and(a, b);
28012        let d: i64x1 = i64x1::new(0);
28013        simd_ne(c, transmute(d))
28014    }
28015}
28016#[doc = "Signed compare bitwise Test bits nonzero"]
28017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28018#[inline]
28019#[target_feature(enable = "neon")]
28020#[cfg_attr(test, assert_instr(cmtst))]
28021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28022pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28023    unsafe {
28024        let c: int64x2_t = simd_and(a, b);
28025        let d: i64x2 = i64x2::new(0, 0);
28026        simd_ne(c, transmute(d))
28027    }
28028}
28029#[doc = "Signed compare bitwise Test bits nonzero"]
28030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28031#[inline]
28032#[target_feature(enable = "neon")]
28033#[cfg_attr(test, assert_instr(cmtst))]
28034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28035pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28036    unsafe {
28037        let c: poly64x1_t = simd_and(a, b);
28038        let d: i64x1 = i64x1::new(0);
28039        simd_ne(c, transmute(d))
28040    }
28041}
28042#[doc = "Signed compare bitwise Test bits nonzero"]
28043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28044#[inline]
28045#[target_feature(enable = "neon")]
28046#[cfg_attr(test, assert_instr(cmtst))]
28047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28048pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28049    unsafe {
28050        let c: poly64x2_t = simd_and(a, b);
28051        let d: i64x2 = i64x2::new(0, 0);
28052        simd_ne(c, transmute(d))
28053    }
28054}
28055#[doc = "Unsigned compare bitwise Test bits nonzero"]
28056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28057#[inline]
28058#[target_feature(enable = "neon")]
28059#[cfg_attr(test, assert_instr(cmtst))]
28060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28061pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28062    unsafe {
28063        let c: uint64x1_t = simd_and(a, b);
28064        let d: u64x1 = u64x1::new(0);
28065        simd_ne(c, transmute(d))
28066    }
28067}
28068#[doc = "Unsigned compare bitwise Test bits nonzero"]
28069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28070#[inline]
28071#[target_feature(enable = "neon")]
28072#[cfg_attr(test, assert_instr(cmtst))]
28073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28074pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28075    unsafe {
28076        let c: uint64x2_t = simd_and(a, b);
28077        let d: u64x2 = u64x2::new(0, 0);
28078        simd_ne(c, transmute(d))
28079    }
28080}
28081#[doc = "Compare bitwise test bits nonzero"]
28082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28083#[inline]
28084#[target_feature(enable = "neon")]
28085#[cfg_attr(test, assert_instr(tst))]
28086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28087pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28088    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28089}
28090#[doc = "Compare bitwise test bits nonzero"]
28091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28092#[inline]
28093#[target_feature(enable = "neon")]
28094#[cfg_attr(test, assert_instr(tst))]
28095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28096pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28097    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28098}
28099#[doc = "Signed saturating Accumulate of Unsigned value."]
28100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28101#[inline]
28102#[target_feature(enable = "neon")]
28103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28104#[cfg_attr(test, assert_instr(suqadd))]
28105pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28106    unsafe extern "unadjusted" {
28107        #[cfg_attr(
28108            any(target_arch = "aarch64", target_arch = "arm64ec"),
28109            link_name = "llvm.aarch64.neon.suqadd.v8i8"
28110        )]
28111        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28112    }
28113    unsafe { _vuqadd_s8(a, b) }
28114}
28115#[doc = "Signed saturating Accumulate of Unsigned value."]
28116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28117#[inline]
28118#[target_feature(enable = "neon")]
28119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28120#[cfg_attr(test, assert_instr(suqadd))]
28121pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28122    unsafe extern "unadjusted" {
28123        #[cfg_attr(
28124            any(target_arch = "aarch64", target_arch = "arm64ec"),
28125            link_name = "llvm.aarch64.neon.suqadd.v16i8"
28126        )]
28127        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28128    }
28129    unsafe { _vuqaddq_s8(a, b) }
28130}
28131#[doc = "Signed saturating Accumulate of Unsigned value."]
28132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28133#[inline]
28134#[target_feature(enable = "neon")]
28135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28136#[cfg_attr(test, assert_instr(suqadd))]
28137pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28138    unsafe extern "unadjusted" {
28139        #[cfg_attr(
28140            any(target_arch = "aarch64", target_arch = "arm64ec"),
28141            link_name = "llvm.aarch64.neon.suqadd.v4i16"
28142        )]
28143        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28144    }
28145    unsafe { _vuqadd_s16(a, b) }
28146}
28147#[doc = "Signed saturating Accumulate of Unsigned value."]
28148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28149#[inline]
28150#[target_feature(enable = "neon")]
28151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28152#[cfg_attr(test, assert_instr(suqadd))]
28153pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28154    unsafe extern "unadjusted" {
28155        #[cfg_attr(
28156            any(target_arch = "aarch64", target_arch = "arm64ec"),
28157            link_name = "llvm.aarch64.neon.suqadd.v8i16"
28158        )]
28159        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28160    }
28161    unsafe { _vuqaddq_s16(a, b) }
28162}
28163#[doc = "Signed saturating Accumulate of Unsigned value."]
28164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28165#[inline]
28166#[target_feature(enable = "neon")]
28167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28168#[cfg_attr(test, assert_instr(suqadd))]
28169pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28170    unsafe extern "unadjusted" {
28171        #[cfg_attr(
28172            any(target_arch = "aarch64", target_arch = "arm64ec"),
28173            link_name = "llvm.aarch64.neon.suqadd.v2i32"
28174        )]
28175        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28176    }
28177    unsafe { _vuqadd_s32(a, b) }
28178}
28179#[doc = "Signed saturating Accumulate of Unsigned value."]
28180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28181#[inline]
28182#[target_feature(enable = "neon")]
28183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28184#[cfg_attr(test, assert_instr(suqadd))]
28185pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28186    unsafe extern "unadjusted" {
28187        #[cfg_attr(
28188            any(target_arch = "aarch64", target_arch = "arm64ec"),
28189            link_name = "llvm.aarch64.neon.suqadd.v4i32"
28190        )]
28191        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28192    }
28193    unsafe { _vuqaddq_s32(a, b) }
28194}
28195#[doc = "Signed saturating Accumulate of Unsigned value."]
28196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28197#[inline]
28198#[target_feature(enable = "neon")]
28199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28200#[cfg_attr(test, assert_instr(suqadd))]
28201pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28202    unsafe extern "unadjusted" {
28203        #[cfg_attr(
28204            any(target_arch = "aarch64", target_arch = "arm64ec"),
28205            link_name = "llvm.aarch64.neon.suqadd.v1i64"
28206        )]
28207        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28208    }
28209    unsafe { _vuqadd_s64(a, b) }
28210}
28211#[doc = "Signed saturating Accumulate of Unsigned value."]
28212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28213#[inline]
28214#[target_feature(enable = "neon")]
28215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28216#[cfg_attr(test, assert_instr(suqadd))]
28217pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28218    unsafe extern "unadjusted" {
28219        #[cfg_attr(
28220            any(target_arch = "aarch64", target_arch = "arm64ec"),
28221            link_name = "llvm.aarch64.neon.suqadd.v2i64"
28222        )]
28223        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28224    }
28225    unsafe { _vuqaddq_s64(a, b) }
28226}
28227#[doc = "Signed saturating accumulate of unsigned value"]
28228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28229#[inline]
28230#[target_feature(enable = "neon")]
28231#[cfg_attr(test, assert_instr(suqadd))]
28232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28233pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28234    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28235}
28236#[doc = "Signed saturating accumulate of unsigned value"]
28237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28238#[inline]
28239#[target_feature(enable = "neon")]
28240#[cfg_attr(test, assert_instr(suqadd))]
28241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28242pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28243    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28244}
28245#[doc = "Signed saturating accumulate of unsigned value"]
28246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28247#[inline]
28248#[target_feature(enable = "neon")]
28249#[cfg_attr(test, assert_instr(suqadd))]
28250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28251pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28252    unsafe extern "unadjusted" {
28253        #[cfg_attr(
28254            any(target_arch = "aarch64", target_arch = "arm64ec"),
28255            link_name = "llvm.aarch64.neon.suqadd.i64"
28256        )]
28257        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28258    }
28259    unsafe { _vuqaddd_s64(a, b) }
28260}
28261#[doc = "Signed saturating accumulate of unsigned value"]
28262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28263#[inline]
28264#[target_feature(enable = "neon")]
28265#[cfg_attr(test, assert_instr(suqadd))]
28266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28267pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28268    unsafe extern "unadjusted" {
28269        #[cfg_attr(
28270            any(target_arch = "aarch64", target_arch = "arm64ec"),
28271            link_name = "llvm.aarch64.neon.suqadd.i32"
28272        )]
28273        fn _vuqadds_s32(a: i32, b: u32) -> i32;
28274    }
28275    unsafe { _vuqadds_s32(a, b) }
28276}
28277#[doc = "Dot product index form with unsigned and signed integers"]
28278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
28279#[inline]
28280#[target_feature(enable = "neon,i8mm")]
28281#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28282#[rustc_legacy_const_generics(3)]
28283#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28284pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
28285    static_assert_uimm_bits!(LANE, 2);
28286    let c: int32x4_t = vreinterpretq_s32_s8(c);
28287    unsafe {
28288        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
28289        vusdot_s32(a, b, vreinterpret_s8_s32(c))
28290    }
28291}
28292#[doc = "Dot product index form with unsigned and signed integers"]
28293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
28294#[inline]
28295#[target_feature(enable = "neon,i8mm")]
28296#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28297#[rustc_legacy_const_generics(3)]
28298#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28299pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
28300    static_assert_uimm_bits!(LANE, 2);
28301    let c: int32x4_t = vreinterpretq_s32_s8(c);
28302    unsafe {
28303        let c: int32x4_t =
28304            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
28305        vusdotq_s32(a, b, vreinterpretq_s8_s32(c))
28306    }
28307}
28308#[doc = "Unzip vectors"]
28309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28310#[inline]
28311#[target_feature(enable = "neon,fp16")]
28312#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28313#[cfg(not(target_arch = "arm64ec"))]
28314#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28315pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28316    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28317}
28318#[doc = "Unzip vectors"]
28319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28320#[inline]
28321#[target_feature(enable = "neon,fp16")]
28322#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28323#[cfg(not(target_arch = "arm64ec"))]
28324#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28325pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28326    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28327}
28328#[doc = "Unzip vectors"]
28329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28330#[inline]
28331#[target_feature(enable = "neon")]
28332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28333#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28334pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28335    unsafe { simd_shuffle!(a, b, [0, 2]) }
28336}
28337#[doc = "Unzip vectors"]
28338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28339#[inline]
28340#[target_feature(enable = "neon")]
28341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28342#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28343pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28344    unsafe { simd_shuffle!(a, b, [0, 2]) }
28345}
28346#[doc = "Unzip vectors"]
28347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28348#[inline]
28349#[target_feature(enable = "neon")]
28350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28351#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28352pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28353    unsafe { simd_shuffle!(a, b, [0, 2]) }
28354}
28355#[doc = "Unzip vectors"]
28356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28357#[inline]
28358#[target_feature(enable = "neon")]
28359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28360#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28361pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28362    unsafe { simd_shuffle!(a, b, [0, 2]) }
28363}
28364#[doc = "Unzip vectors"]
28365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28366#[inline]
28367#[target_feature(enable = "neon")]
28368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28369#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28370pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28371    unsafe { simd_shuffle!(a, b, [0, 2]) }
28372}
28373#[doc = "Unzip vectors"]
28374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28375#[inline]
28376#[target_feature(enable = "neon")]
28377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28378#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28379pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28380    unsafe { simd_shuffle!(a, b, [0, 2]) }
28381}
28382#[doc = "Unzip vectors"]
28383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28384#[inline]
28385#[target_feature(enable = "neon")]
28386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28387#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28388pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28389    unsafe { simd_shuffle!(a, b, [0, 2]) }
28390}
28391#[doc = "Unzip vectors"]
28392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28393#[inline]
28394#[target_feature(enable = "neon")]
28395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28396#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28397pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28398    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28399}
28400#[doc = "Unzip vectors"]
28401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28402#[inline]
28403#[target_feature(enable = "neon")]
28404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28405#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28406pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28407    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28408}
28409#[doc = "Unzip vectors"]
28410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28411#[inline]
28412#[target_feature(enable = "neon")]
28413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28414#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28415pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28416    unsafe {
28417        simd_shuffle!(
28418            a,
28419            b,
28420            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28421        )
28422    }
28423}
28424#[doc = "Unzip vectors"]
28425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28426#[inline]
28427#[target_feature(enable = "neon")]
28428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28429#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28430pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28431    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28432}
28433#[doc = "Unzip vectors"]
28434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28435#[inline]
28436#[target_feature(enable = "neon")]
28437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28438#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28439pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28440    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28441}
28442#[doc = "Unzip vectors"]
28443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28444#[inline]
28445#[target_feature(enable = "neon")]
28446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28447#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28448pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28449    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28450}
28451#[doc = "Unzip vectors"]
28452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28453#[inline]
28454#[target_feature(enable = "neon")]
28455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28456#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28457pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28458    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28459}
28460#[doc = "Unzip vectors"]
28461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28462#[inline]
28463#[target_feature(enable = "neon")]
28464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28465#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28466pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28467    unsafe {
28468        simd_shuffle!(
28469            a,
28470            b,
28471            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28472        )
28473    }
28474}
28475#[doc = "Unzip vectors"]
28476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28477#[inline]
28478#[target_feature(enable = "neon")]
28479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28480#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28481pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28482    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28483}
28484#[doc = "Unzip vectors"]
28485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28486#[inline]
28487#[target_feature(enable = "neon")]
28488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28489#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28490pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28491    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28492}
28493#[doc = "Unzip vectors"]
28494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28495#[inline]
28496#[target_feature(enable = "neon")]
28497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28498#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28499pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28500    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28501}
28502#[doc = "Unzip vectors"]
28503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28504#[inline]
28505#[target_feature(enable = "neon")]
28506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28507#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28508pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28509    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28510}
28511#[doc = "Unzip vectors"]
28512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28513#[inline]
28514#[target_feature(enable = "neon")]
28515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28516#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28517pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28518    unsafe {
28519        simd_shuffle!(
28520            a,
28521            b,
28522            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28523        )
28524    }
28525}
28526#[doc = "Unzip vectors"]
28527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28528#[inline]
28529#[target_feature(enable = "neon")]
28530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28531#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28532pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28533    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28534}
28535#[doc = "Unzip vectors"]
28536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28537#[inline]
28538#[target_feature(enable = "neon")]
28539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28540#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28541pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28542    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28543}
28544#[doc = "Unzip vectors"]
28545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28546#[inline]
28547#[target_feature(enable = "neon,fp16")]
28548#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28549#[cfg(not(target_arch = "arm64ec"))]
28550#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28551pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28552    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28553}
28554#[doc = "Unzip vectors"]
28555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
28556#[inline]
28557#[target_feature(enable = "neon,fp16")]
28558#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28559#[cfg(not(target_arch = "arm64ec"))]
28560#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28561pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28562    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28563}
28564#[doc = "Unzip vectors"]
28565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
28566#[inline]
28567#[target_feature(enable = "neon")]
28568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28569#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28570pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28571    unsafe { simd_shuffle!(a, b, [1, 3]) }
28572}
28573#[doc = "Unzip vectors"]
28574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
28575#[inline]
28576#[target_feature(enable = "neon")]
28577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28578#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28579pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28580    unsafe { simd_shuffle!(a, b, [1, 3]) }
28581}
28582#[doc = "Unzip vectors"]
28583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
28584#[inline]
28585#[target_feature(enable = "neon")]
28586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28587#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28588pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28589    unsafe { simd_shuffle!(a, b, [1, 3]) }
28590}
28591#[doc = "Unzip vectors"]
28592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
28593#[inline]
28594#[target_feature(enable = "neon")]
28595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28596#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28597pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28598    unsafe { simd_shuffle!(a, b, [1, 3]) }
28599}
28600#[doc = "Unzip vectors"]
28601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
28602#[inline]
28603#[target_feature(enable = "neon")]
28604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28605#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28606pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28607    unsafe { simd_shuffle!(a, b, [1, 3]) }
28608}
28609#[doc = "Unzip vectors"]
28610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
28611#[inline]
28612#[target_feature(enable = "neon")]
28613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28614#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28615pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28616    unsafe { simd_shuffle!(a, b, [1, 3]) }
28617}
28618#[doc = "Unzip vectors"]
28619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
28620#[inline]
28621#[target_feature(enable = "neon")]
28622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28623#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28624pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28625    unsafe { simd_shuffle!(a, b, [1, 3]) }
28626}
28627#[doc = "Unzip vectors"]
28628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
28629#[inline]
28630#[target_feature(enable = "neon")]
28631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28632#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28633pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28634    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28635}
28636#[doc = "Unzip vectors"]
28637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
28638#[inline]
28639#[target_feature(enable = "neon")]
28640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28641#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28642pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28643    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28644}
28645#[doc = "Unzip vectors"]
28646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
28647#[inline]
28648#[target_feature(enable = "neon")]
28649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28650#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28651pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28652    unsafe {
28653        simd_shuffle!(
28654            a,
28655            b,
28656            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28657        )
28658    }
28659}
28660#[doc = "Unzip vectors"]
28661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
28662#[inline]
28663#[target_feature(enable = "neon")]
28664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28665#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28666pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28667    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28668}
28669#[doc = "Unzip vectors"]
28670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
28671#[inline]
28672#[target_feature(enable = "neon")]
28673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28674#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28675pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28676    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28677}
28678#[doc = "Unzip vectors"]
28679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
28680#[inline]
28681#[target_feature(enable = "neon")]
28682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28683#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28684pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28685    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28686}
28687#[doc = "Unzip vectors"]
28688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
28689#[inline]
28690#[target_feature(enable = "neon")]
28691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28692#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28693pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28694    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28695}
28696#[doc = "Unzip vectors"]
28697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
28698#[inline]
28699#[target_feature(enable = "neon")]
28700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28701#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28702pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28703    unsafe {
28704        simd_shuffle!(
28705            a,
28706            b,
28707            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28708        )
28709    }
28710}
28711#[doc = "Unzip vectors"]
28712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
28713#[inline]
28714#[target_feature(enable = "neon")]
28715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28716#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28717pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28718    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28719}
28720#[doc = "Unzip vectors"]
28721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
28722#[inline]
28723#[target_feature(enable = "neon")]
28724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28725#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28726pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28727    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28728}
28729#[doc = "Unzip vectors"]
28730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
28731#[inline]
28732#[target_feature(enable = "neon")]
28733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28734#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28735pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28736    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28737}
28738#[doc = "Unzip vectors"]
28739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
28740#[inline]
28741#[target_feature(enable = "neon")]
28742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28743#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28744pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28745    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28746}
28747#[doc = "Unzip vectors"]
28748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
28749#[inline]
28750#[target_feature(enable = "neon")]
28751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28752#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28753pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28754    unsafe {
28755        simd_shuffle!(
28756            a,
28757            b,
28758            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28759        )
28760    }
28761}
28762#[doc = "Unzip vectors"]
28763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
28764#[inline]
28765#[target_feature(enable = "neon")]
28766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28767#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28768pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28769    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28770}
28771#[doc = "Unzip vectors"]
28772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
28773#[inline]
28774#[target_feature(enable = "neon")]
28775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28776#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28777pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28778    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28779}
28780#[doc = "Exclusive OR and rotate"]
28781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
28782#[inline]
28783#[target_feature(enable = "neon,sha3")]
28784#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
28785#[rustc_legacy_const_generics(2)]
28786#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
28787pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28788    static_assert_uimm_bits!(IMM6, 6);
28789    unsafe extern "unadjusted" {
28790        #[cfg_attr(
28791            any(target_arch = "aarch64", target_arch = "arm64ec"),
28792            link_name = "llvm.aarch64.crypto.xar"
28793        )]
28794        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
28795    }
28796    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
28797}
28798#[doc = "Zip vectors"]
28799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
28800#[inline]
28801#[target_feature(enable = "neon,fp16")]
28802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28803#[cfg(not(target_arch = "arm64ec"))]
28804#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28805pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28806    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28807}
28808#[doc = "Zip vectors"]
28809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
28810#[inline]
28811#[target_feature(enable = "neon,fp16")]
28812#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28813#[cfg(not(target_arch = "arm64ec"))]
28814#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28815pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28816    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28817}
28818#[doc = "Zip vectors"]
28819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
28820#[inline]
28821#[target_feature(enable = "neon")]
28822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28823#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28824pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28825    unsafe { simd_shuffle!(a, b, [0, 2]) }
28826}
28827#[doc = "Zip vectors"]
28828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
28829#[inline]
28830#[target_feature(enable = "neon")]
28831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28832#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28833pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28834    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28835}
28836#[doc = "Zip vectors"]
28837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
28838#[inline]
28839#[target_feature(enable = "neon")]
28840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28841#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28842pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28843    unsafe { simd_shuffle!(a, b, [0, 2]) }
28844}
28845#[doc = "Zip vectors"]
28846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
28847#[inline]
28848#[target_feature(enable = "neon")]
28849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28850#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28851pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28852    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28853}
28854#[doc = "Zip vectors"]
28855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
28856#[inline]
28857#[target_feature(enable = "neon")]
28858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28859#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28860pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28861    unsafe {
28862        simd_shuffle!(
28863            a,
28864            b,
28865            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28866        )
28867    }
28868}
28869#[doc = "Zip vectors"]
28870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
28871#[inline]
28872#[target_feature(enable = "neon")]
28873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28874#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28875pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28876    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28877}
28878#[doc = "Zip vectors"]
28879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
28880#[inline]
28881#[target_feature(enable = "neon")]
28882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28884pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28885    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28886}
28887#[doc = "Zip vectors"]
28888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
28889#[inline]
28890#[target_feature(enable = "neon")]
28891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28892#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28893pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28894    unsafe { simd_shuffle!(a, b, [0, 2]) }
28895}
28896#[doc = "Zip vectors"]
28897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
28898#[inline]
28899#[target_feature(enable = "neon")]
28900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28901#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28902pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28903    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28904}
28905#[doc = "Zip vectors"]
28906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
28907#[inline]
28908#[target_feature(enable = "neon")]
28909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28910#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28911pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28912    unsafe { simd_shuffle!(a, b, [0, 2]) }
28913}
28914#[doc = "Zip vectors"]
28915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
28916#[inline]
28917#[target_feature(enable = "neon")]
28918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28919#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28920pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28921    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28922}
28923#[doc = "Zip vectors"]
28924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
28925#[inline]
28926#[target_feature(enable = "neon")]
28927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28928#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28929pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28930    unsafe {
28931        simd_shuffle!(
28932            a,
28933            b,
28934            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28935        )
28936    }
28937}
28938#[doc = "Zip vectors"]
28939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
28940#[inline]
28941#[target_feature(enable = "neon")]
28942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28943#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28944pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28945    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28946}
28947#[doc = "Zip vectors"]
28948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
28949#[inline]
28950#[target_feature(enable = "neon")]
28951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28952#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28953pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28954    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28955}
28956#[doc = "Zip vectors"]
28957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
28958#[inline]
28959#[target_feature(enable = "neon")]
28960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28961#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28962pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28963    unsafe { simd_shuffle!(a, b, [0, 2]) }
28964}
28965#[doc = "Zip vectors"]
28966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
28967#[inline]
28968#[target_feature(enable = "neon")]
28969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28970#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28971pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28972    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28973}
28974#[doc = "Zip vectors"]
28975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
28976#[inline]
28977#[target_feature(enable = "neon")]
28978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28979#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28980pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28981    unsafe { simd_shuffle!(a, b, [0, 2]) }
28982}
28983#[doc = "Zip vectors"]
28984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
28985#[inline]
28986#[target_feature(enable = "neon")]
28987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28988#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28989pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28990    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28991}
28992#[doc = "Zip vectors"]
28993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
28994#[inline]
28995#[target_feature(enable = "neon")]
28996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28997#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28998pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28999    unsafe {
29000        simd_shuffle!(
29001            a,
29002            b,
29003            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29004        )
29005    }
29006}
29007#[doc = "Zip vectors"]
29008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29009#[inline]
29010#[target_feature(enable = "neon")]
29011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29012#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29013pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29014    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29015}
29016#[doc = "Zip vectors"]
29017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29018#[inline]
29019#[target_feature(enable = "neon")]
29020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29021#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29022pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29023    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29024}
29025#[doc = "Zip vectors"]
29026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29027#[inline]
29028#[target_feature(enable = "neon")]
29029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29030#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29031pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29032    unsafe { simd_shuffle!(a, b, [0, 2]) }
29033}
29034#[doc = "Zip vectors"]
29035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29036#[inline]
29037#[target_feature(enable = "neon,fp16")]
29038#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29039#[cfg(not(target_arch = "arm64ec"))]
29040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29041pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29042    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29043}
29044#[doc = "Zip vectors"]
29045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29046#[inline]
29047#[target_feature(enable = "neon,fp16")]
29048#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29049#[cfg(not(target_arch = "arm64ec"))]
29050#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29051pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29052    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29053}
29054#[doc = "Zip vectors"]
29055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29056#[inline]
29057#[target_feature(enable = "neon")]
29058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29059#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29060pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29061    unsafe { simd_shuffle!(a, b, [1, 3]) }
29062}
29063#[doc = "Zip vectors"]
29064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29065#[inline]
29066#[target_feature(enable = "neon")]
29067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29068#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29069pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29070    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29071}
29072#[doc = "Zip vectors"]
29073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29074#[inline]
29075#[target_feature(enable = "neon")]
29076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29077#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29078pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29079    unsafe { simd_shuffle!(a, b, [1, 3]) }
29080}
29081#[doc = "Zip vectors"]
29082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29083#[inline]
29084#[target_feature(enable = "neon")]
29085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29086#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29087pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29088    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29089}
29090#[doc = "Zip vectors"]
29091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29092#[inline]
29093#[target_feature(enable = "neon")]
29094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29095#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29096pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29097    unsafe {
29098        simd_shuffle!(
29099            a,
29100            b,
29101            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29102        )
29103    }
29104}
29105#[doc = "Zip vectors"]
29106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29107#[inline]
29108#[target_feature(enable = "neon")]
29109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29111pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29112    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29113}
29114#[doc = "Zip vectors"]
29115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29116#[inline]
29117#[target_feature(enable = "neon")]
29118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29119#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29120pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29121    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29122}
29123#[doc = "Zip vectors"]
29124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29125#[inline]
29126#[target_feature(enable = "neon")]
29127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29128#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29129pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29130    unsafe { simd_shuffle!(a, b, [1, 3]) }
29131}
29132#[doc = "Zip vectors"]
29133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29134#[inline]
29135#[target_feature(enable = "neon")]
29136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29137#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29138pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29139    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29140}
29141#[doc = "Zip vectors"]
29142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29143#[inline]
29144#[target_feature(enable = "neon")]
29145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29146#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29147pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29148    unsafe { simd_shuffle!(a, b, [1, 3]) }
29149}
29150#[doc = "Zip vectors"]
29151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29152#[inline]
29153#[target_feature(enable = "neon")]
29154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29155#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29156pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29157    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29158}
29159#[doc = "Zip vectors"]
29160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29161#[inline]
29162#[target_feature(enable = "neon")]
29163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29164#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29165pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29166    unsafe {
29167        simd_shuffle!(
29168            a,
29169            b,
29170            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29171        )
29172    }
29173}
29174#[doc = "Zip vectors"]
29175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29176#[inline]
29177#[target_feature(enable = "neon")]
29178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29179#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29180pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29181    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29182}
29183#[doc = "Zip vectors"]
29184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29185#[inline]
29186#[target_feature(enable = "neon")]
29187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29188#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29189pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29190    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29191}
29192#[doc = "Zip vectors"]
29193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29194#[inline]
29195#[target_feature(enable = "neon")]
29196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29197#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29198pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29199    unsafe { simd_shuffle!(a, b, [1, 3]) }
29200}
29201#[doc = "Zip vectors"]
29202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29203#[inline]
29204#[target_feature(enable = "neon")]
29205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29206#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29207pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29208    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29209}
29210#[doc = "Zip vectors"]
29211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29212#[inline]
29213#[target_feature(enable = "neon")]
29214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29215#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29216pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29217    unsafe { simd_shuffle!(a, b, [1, 3]) }
29218}
29219#[doc = "Zip vectors"]
29220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29221#[inline]
29222#[target_feature(enable = "neon")]
29223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29224#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29225pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29226    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29227}
29228#[doc = "Zip vectors"]
29229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29230#[inline]
29231#[target_feature(enable = "neon")]
29232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29233#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29234pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29235    unsafe {
29236        simd_shuffle!(
29237            a,
29238            b,
29239            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29240        )
29241    }
29242}
29243#[doc = "Zip vectors"]
29244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29245#[inline]
29246#[target_feature(enable = "neon")]
29247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29248#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29249pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29250    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29251}
29252#[doc = "Zip vectors"]
29253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29254#[inline]
29255#[target_feature(enable = "neon")]
29256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29257#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29258pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29259    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29260}
29261#[doc = "Zip vectors"]
29262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29263#[inline]
29264#[target_feature(enable = "neon")]
29265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29266#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29267pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29268    unsafe { simd_shuffle!(a, b, [1, 3]) }
29269}