core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg(not(target_arch = "arm"))]
20#[cfg_attr(test, assert_instr(crc32cx))]
21#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
22pub fn __crc32cd(crc: u32, data: u64) -> u32 {
23    unsafe extern "unadjusted" {
24        #[cfg_attr(
25            any(target_arch = "aarch64", target_arch = "arm64ec"),
26            link_name = "llvm.aarch64.crc32cx"
27        )]
28        fn ___crc32cd(crc: u32, data: u64) -> u32;
29    }
30    unsafe { ___crc32cd(crc, data) }
31}
32#[doc = "CRC32 single round checksum for quad words (64 bits)."]
33#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
34#[inline]
35#[target_feature(enable = "crc")]
36#[cfg(not(target_arch = "arm"))]
37#[cfg_attr(test, assert_instr(crc32x))]
38#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
39pub fn __crc32d(crc: u32, data: u64) -> u32 {
40    unsafe extern "unadjusted" {
41        #[cfg_attr(
42            any(target_arch = "aarch64", target_arch = "arm64ec"),
43            link_name = "llvm.aarch64.crc32x"
44        )]
45        fn ___crc32d(crc: u32, data: u64) -> u32;
46    }
47    unsafe { ___crc32d(crc, data) }
48}
49#[doc = "Signed Absolute difference and Accumulate Long"]
50#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
51#[inline]
52#[target_feature(enable = "neon")]
53#[stable(feature = "neon_intrinsics", since = "1.59.0")]
54#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
55pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
56    unsafe {
57        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
58        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
59        let f: int8x8_t = vabd_s8(d, e);
60        let f: uint8x8_t = simd_cast(f);
61        simd_add(a, simd_cast(f))
62    }
63}
64#[doc = "Signed Absolute difference and Accumulate Long"]
65#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
66#[inline]
67#[target_feature(enable = "neon")]
68#[stable(feature = "neon_intrinsics", since = "1.59.0")]
69#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
70pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
71    unsafe {
72        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
73        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
74        let f: int16x4_t = vabd_s16(d, e);
75        let f: uint16x4_t = simd_cast(f);
76        simd_add(a, simd_cast(f))
77    }
78}
79#[doc = "Signed Absolute difference and Accumulate Long"]
80#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
81#[inline]
82#[target_feature(enable = "neon")]
83#[stable(feature = "neon_intrinsics", since = "1.59.0")]
84#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
85pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
86    unsafe {
87        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
88        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
89        let f: int32x2_t = vabd_s32(d, e);
90        let f: uint32x2_t = simd_cast(f);
91        simd_add(a, simd_cast(f))
92    }
93}
94#[doc = "Unsigned Absolute difference and Accumulate Long"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
96#[inline]
97#[target_feature(enable = "neon")]
98#[stable(feature = "neon_intrinsics", since = "1.59.0")]
99#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
100pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
101    unsafe {
102        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
103        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
104        let f: uint8x8_t = vabd_u8(d, e);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
115    unsafe {
116        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
117        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
118        let f: uint16x4_t = vabd_u16(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
129    unsafe {
130        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
131        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
132        let f: uint32x2_t = vabd_u32(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Absolute difference between the arguments of Floating"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(test, assert_instr(fabd))]
142pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
143    unsafe extern "unadjusted" {
144        #[cfg_attr(
145            any(target_arch = "aarch64", target_arch = "arm64ec"),
146            link_name = "llvm.aarch64.neon.fabd.v1f64"
147        )]
148        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
149    }
150    unsafe { _vabd_f64(a, b) }
151}
152#[doc = "Absolute difference between the arguments of Floating"]
153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
154#[inline]
155#[target_feature(enable = "neon")]
156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
157#[cfg_attr(test, assert_instr(fabd))]
158pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
159    unsafe extern "unadjusted" {
160        #[cfg_attr(
161            any(target_arch = "aarch64", target_arch = "arm64ec"),
162            link_name = "llvm.aarch64.neon.fabd.v2f64"
163        )]
164        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
165    }
166    unsafe { _vabdq_f64(a, b) }
167}
168#[doc = "Floating-point absolute difference"]
169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
170#[inline]
171#[target_feature(enable = "neon")]
172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
173#[cfg_attr(test, assert_instr(fabd))]
174pub fn vabdd_f64(a: f64, b: f64) -> f64 {
175    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
176}
177#[doc = "Floating-point absolute difference"]
178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
179#[inline]
180#[target_feature(enable = "neon")]
181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
182#[cfg_attr(test, assert_instr(fabd))]
183pub fn vabds_f32(a: f32, b: f32) -> f32 {
184    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
185}
186#[doc = "Floating-point absolute difference"]
187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
188#[inline]
189#[target_feature(enable = "neon,fp16")]
190#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
191#[cfg(not(target_arch = "arm64ec"))]
192#[cfg_attr(test, assert_instr(fabd))]
193pub fn vabdh_f16(a: f16, b: f16) -> f16 {
194    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
195}
196#[doc = "Signed Absolute difference Long"]
197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
198#[inline]
199#[target_feature(enable = "neon")]
200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
201#[cfg_attr(test, assert_instr(sabdl2))]
202pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
203    unsafe {
204        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
205        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
206        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
207        simd_cast(e)
208    }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
212#[inline]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
217    unsafe {
218        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
219        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
220        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
226#[inline]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
231    unsafe {
232        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
233        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
234        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Unsigned Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
240#[inline]
241#[target_feature(enable = "neon")]
242#[cfg_attr(test, assert_instr(uabdl2))]
243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
244pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
245    unsafe {
246        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        simd_cast(vabd_u8(c, d))
249    }
250}
251#[doc = "Unsigned Absolute difference Long"]
252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
253#[inline]
254#[target_feature(enable = "neon")]
255#[cfg_attr(test, assert_instr(uabdl2))]
256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
257pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
258    unsafe {
259        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
260        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
261        simd_cast(vabd_u16(c, d))
262    }
263}
264#[doc = "Unsigned Absolute difference Long"]
265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
266#[inline]
267#[target_feature(enable = "neon")]
268#[cfg_attr(test, assert_instr(uabdl2))]
269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
270pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
271    unsafe {
272        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
273        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
274        simd_cast(vabd_u32(c, d))
275    }
276}
277#[doc = "Floating-point absolute value"]
278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
279#[inline]
280#[target_feature(enable = "neon")]
281#[cfg_attr(test, assert_instr(fabs))]
282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
283pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
284    unsafe { simd_fabs(a) }
285}
286#[doc = "Floating-point absolute value"]
287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
288#[inline]
289#[target_feature(enable = "neon")]
290#[cfg_attr(test, assert_instr(fabs))]
291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
292pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
293    unsafe { simd_fabs(a) }
294}
295#[doc = "Absolute Value (wrapping)."]
296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
297#[inline]
298#[target_feature(enable = "neon")]
299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
300#[cfg_attr(test, assert_instr(abs))]
301pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
302    unsafe {
303        let neg: int64x1_t = simd_neg(a);
304        let mask: int64x1_t = simd_ge(a, neg);
305        simd_select(mask, a, neg)
306    }
307}
308#[doc = "Absolute Value (wrapping)."]
309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
310#[inline]
311#[target_feature(enable = "neon")]
312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
313#[cfg_attr(test, assert_instr(abs))]
314pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
315    unsafe {
316        let neg: int64x2_t = simd_neg(a);
317        let mask: int64x2_t = simd_ge(a, neg);
318        simd_select(mask, a, neg)
319    }
320}
321#[doc = "Absolute Value (wrapping)."]
322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
323#[inline]
324#[target_feature(enable = "neon")]
325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
326#[cfg_attr(test, assert_instr(abs))]
327pub fn vabsd_s64(a: i64) -> i64 {
328    unsafe extern "unadjusted" {
329        #[cfg_attr(
330            any(target_arch = "aarch64", target_arch = "arm64ec"),
331            link_name = "llvm.aarch64.neon.abs.i64"
332        )]
333        fn _vabsd_s64(a: i64) -> i64;
334    }
335    unsafe { _vabsd_s64(a) }
336}
337#[doc = "Add"]
338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
339#[inline]
340#[target_feature(enable = "neon")]
341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
342#[cfg_attr(test, assert_instr(nop))]
343pub fn vaddd_s64(a: i64, b: i64) -> i64 {
344    a.wrapping_add(b)
345}
346#[doc = "Add"]
347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
348#[inline]
349#[target_feature(enable = "neon")]
350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
351#[cfg_attr(test, assert_instr(nop))]
352pub fn vaddd_u64(a: u64, b: u64) -> u64 {
353    a.wrapping_add(b)
354}
355#[doc = "Signed Add Long across Vector"]
356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
357#[inline]
358#[target_feature(enable = "neon")]
359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
360#[cfg_attr(test, assert_instr(saddlv))]
361pub fn vaddlv_s16(a: int16x4_t) -> i32 {
362    unsafe extern "unadjusted" {
363        #[cfg_attr(
364            any(target_arch = "aarch64", target_arch = "arm64ec"),
365            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
366        )]
367        fn _vaddlv_s16(a: int16x4_t) -> i32;
368    }
369    unsafe { _vaddlv_s16(a) }
370}
371#[doc = "Signed Add Long across Vector"]
372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
373#[inline]
374#[target_feature(enable = "neon")]
375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
376#[cfg_attr(test, assert_instr(saddlv))]
377pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
378    unsafe extern "unadjusted" {
379        #[cfg_attr(
380            any(target_arch = "aarch64", target_arch = "arm64ec"),
381            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
382        )]
383        fn _vaddlvq_s16(a: int16x8_t) -> i32;
384    }
385    unsafe { _vaddlvq_s16(a) }
386}
387#[doc = "Signed Add Long across Vector"]
388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
389#[inline]
390#[target_feature(enable = "neon")]
391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
392#[cfg_attr(test, assert_instr(saddlv))]
393pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
394    unsafe extern "unadjusted" {
395        #[cfg_attr(
396            any(target_arch = "aarch64", target_arch = "arm64ec"),
397            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
398        )]
399        fn _vaddlvq_s32(a: int32x4_t) -> i64;
400    }
401    unsafe { _vaddlvq_s32(a) }
402}
403#[doc = "Signed Add Long across Vector"]
404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
405#[inline]
406#[target_feature(enable = "neon")]
407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
408#[cfg_attr(test, assert_instr(saddlp))]
409pub fn vaddlv_s32(a: int32x2_t) -> i64 {
410    unsafe extern "unadjusted" {
411        #[cfg_attr(
412            any(target_arch = "aarch64", target_arch = "arm64ec"),
413            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
414        )]
415        fn _vaddlv_s32(a: int32x2_t) -> i64;
416    }
417    unsafe { _vaddlv_s32(a) }
418}
419#[doc = "Signed Add Long across Vector"]
420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
421#[inline]
422#[target_feature(enable = "neon")]
423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
424#[cfg_attr(test, assert_instr(saddlv))]
425pub fn vaddlv_s8(a: int8x8_t) -> i16 {
426    unsafe extern "unadjusted" {
427        #[cfg_attr(
428            any(target_arch = "aarch64", target_arch = "arm64ec"),
429            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
430        )]
431        fn _vaddlv_s8(a: int8x8_t) -> i32;
432    }
433    unsafe { _vaddlv_s8(a) as i16 }
434}
435#[doc = "Signed Add Long across Vector"]
436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
437#[inline]
438#[target_feature(enable = "neon")]
439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
440#[cfg_attr(test, assert_instr(saddlv))]
441pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
442    unsafe extern "unadjusted" {
443        #[cfg_attr(
444            any(target_arch = "aarch64", target_arch = "arm64ec"),
445            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
446        )]
447        fn _vaddlvq_s8(a: int8x16_t) -> i32;
448    }
449    unsafe { _vaddlvq_s8(a) as i16 }
450}
451#[doc = "Unsigned Add Long across Vector"]
452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
453#[inline]
454#[target_feature(enable = "neon")]
455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
456#[cfg_attr(test, assert_instr(uaddlv))]
457pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
458    unsafe extern "unadjusted" {
459        #[cfg_attr(
460            any(target_arch = "aarch64", target_arch = "arm64ec"),
461            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
462        )]
463        fn _vaddlv_u16(a: uint16x4_t) -> u32;
464    }
465    unsafe { _vaddlv_u16(a) }
466}
467#[doc = "Unsigned Add Long across Vector"]
468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
469#[inline]
470#[target_feature(enable = "neon")]
471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
472#[cfg_attr(test, assert_instr(uaddlv))]
473pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
474    unsafe extern "unadjusted" {
475        #[cfg_attr(
476            any(target_arch = "aarch64", target_arch = "arm64ec"),
477            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
478        )]
479        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
480    }
481    unsafe { _vaddlvq_u16(a) }
482}
483#[doc = "Unsigned Add Long across Vector"]
484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
485#[inline]
486#[target_feature(enable = "neon")]
487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
488#[cfg_attr(test, assert_instr(uaddlv))]
489pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
490    unsafe extern "unadjusted" {
491        #[cfg_attr(
492            any(target_arch = "aarch64", target_arch = "arm64ec"),
493            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
494        )]
495        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
496    }
497    unsafe { _vaddlvq_u32(a) }
498}
499#[doc = "Unsigned Add Long across Vector"]
500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
501#[inline]
502#[target_feature(enable = "neon")]
503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
504#[cfg_attr(test, assert_instr(uaddlp))]
505pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
506    unsafe extern "unadjusted" {
507        #[cfg_attr(
508            any(target_arch = "aarch64", target_arch = "arm64ec"),
509            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
510        )]
511        fn _vaddlv_u32(a: uint32x2_t) -> u64;
512    }
513    unsafe { _vaddlv_u32(a) }
514}
515#[doc = "Unsigned Add Long across Vector"]
516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
517#[inline]
518#[target_feature(enable = "neon")]
519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
520#[cfg_attr(test, assert_instr(uaddlv))]
521pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
522    unsafe extern "unadjusted" {
523        #[cfg_attr(
524            any(target_arch = "aarch64", target_arch = "arm64ec"),
525            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
526        )]
527        fn _vaddlv_u8(a: uint8x8_t) -> i32;
528    }
529    unsafe { _vaddlv_u8(a) as u16 }
530}
531#[doc = "Unsigned Add Long across Vector"]
532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
533#[inline]
534#[target_feature(enable = "neon")]
535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
536#[cfg_attr(test, assert_instr(uaddlv))]
537pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
538    unsafe extern "unadjusted" {
539        #[cfg_attr(
540            any(target_arch = "aarch64", target_arch = "arm64ec"),
541            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
542        )]
543        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
544    }
545    unsafe { _vaddlvq_u8(a) as u16 }
546}
547#[doc = "Floating-point add across vector"]
548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
549#[inline]
550#[target_feature(enable = "neon")]
551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
552#[cfg_attr(test, assert_instr(faddp))]
553pub fn vaddv_f32(a: float32x2_t) -> f32 {
554    unsafe extern "unadjusted" {
555        #[cfg_attr(
556            any(target_arch = "aarch64", target_arch = "arm64ec"),
557            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
558        )]
559        fn _vaddv_f32(a: float32x2_t) -> f32;
560    }
561    unsafe { _vaddv_f32(a) }
562}
563#[doc = "Floating-point add across vector"]
564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
565#[inline]
566#[target_feature(enable = "neon")]
567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
568#[cfg_attr(test, assert_instr(faddp))]
569pub fn vaddvq_f32(a: float32x4_t) -> f32 {
570    unsafe extern "unadjusted" {
571        #[cfg_attr(
572            any(target_arch = "aarch64", target_arch = "arm64ec"),
573            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
574        )]
575        fn _vaddvq_f32(a: float32x4_t) -> f32;
576    }
577    unsafe { _vaddvq_f32(a) }
578}
579#[doc = "Floating-point add across vector"]
580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
581#[inline]
582#[target_feature(enable = "neon")]
583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
584#[cfg_attr(test, assert_instr(faddp))]
585pub fn vaddvq_f64(a: float64x2_t) -> f64 {
586    unsafe extern "unadjusted" {
587        #[cfg_attr(
588            any(target_arch = "aarch64", target_arch = "arm64ec"),
589            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
590        )]
591        fn _vaddvq_f64(a: float64x2_t) -> f64;
592    }
593    unsafe { _vaddvq_f64(a) }
594}
595#[doc = "Add across vector"]
596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
597#[inline]
598#[target_feature(enable = "neon")]
599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
600#[cfg_attr(test, assert_instr(addp))]
601pub fn vaddv_s32(a: int32x2_t) -> i32 {
602    unsafe { simd_reduce_add_unordered(a) }
603}
604#[doc = "Add across vector"]
605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
606#[inline]
607#[target_feature(enable = "neon")]
608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
609#[cfg_attr(test, assert_instr(addv))]
610pub fn vaddv_s8(a: int8x8_t) -> i8 {
611    unsafe { simd_reduce_add_unordered(a) }
612}
613#[doc = "Add across vector"]
614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
615#[inline]
616#[target_feature(enable = "neon")]
617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
618#[cfg_attr(test, assert_instr(addv))]
619pub fn vaddvq_s8(a: int8x16_t) -> i8 {
620    unsafe { simd_reduce_add_unordered(a) }
621}
622#[doc = "Add across vector"]
623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
624#[inline]
625#[target_feature(enable = "neon")]
626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
627#[cfg_attr(test, assert_instr(addv))]
628pub fn vaddv_s16(a: int16x4_t) -> i16 {
629    unsafe { simd_reduce_add_unordered(a) }
630}
631#[doc = "Add across vector"]
632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
633#[inline]
634#[target_feature(enable = "neon")]
635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
636#[cfg_attr(test, assert_instr(addv))]
637pub fn vaddvq_s16(a: int16x8_t) -> i16 {
638    unsafe { simd_reduce_add_unordered(a) }
639}
640#[doc = "Add across vector"]
641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
642#[inline]
643#[target_feature(enable = "neon")]
644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
645#[cfg_attr(test, assert_instr(addv))]
646pub fn vaddvq_s32(a: int32x4_t) -> i32 {
647    unsafe { simd_reduce_add_unordered(a) }
648}
649#[doc = "Add across vector"]
650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
651#[inline]
652#[target_feature(enable = "neon")]
653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
654#[cfg_attr(test, assert_instr(addp))]
655pub fn vaddv_u32(a: uint32x2_t) -> u32 {
656    unsafe { simd_reduce_add_unordered(a) }
657}
658#[doc = "Add across vector"]
659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
660#[inline]
661#[target_feature(enable = "neon")]
662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
663#[cfg_attr(test, assert_instr(addv))]
664pub fn vaddv_u8(a: uint8x8_t) -> u8 {
665    unsafe { simd_reduce_add_unordered(a) }
666}
667#[doc = "Add across vector"]
668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
669#[inline]
670#[target_feature(enable = "neon")]
671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
672#[cfg_attr(test, assert_instr(addv))]
673pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
674    unsafe { simd_reduce_add_unordered(a) }
675}
676#[doc = "Add across vector"]
677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
678#[inline]
679#[target_feature(enable = "neon")]
680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
681#[cfg_attr(test, assert_instr(addv))]
682pub fn vaddv_u16(a: uint16x4_t) -> u16 {
683    unsafe { simd_reduce_add_unordered(a) }
684}
685#[doc = "Add across vector"]
686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
687#[inline]
688#[target_feature(enable = "neon")]
689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
690#[cfg_attr(test, assert_instr(addv))]
691pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
692    unsafe { simd_reduce_add_unordered(a) }
693}
694#[doc = "Add across vector"]
695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
696#[inline]
697#[target_feature(enable = "neon")]
698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
699#[cfg_attr(test, assert_instr(addv))]
700pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
701    unsafe { simd_reduce_add_unordered(a) }
702}
703#[doc = "Add across vector"]
704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
705#[inline]
706#[target_feature(enable = "neon")]
707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
708#[cfg_attr(test, assert_instr(addp))]
709pub fn vaddvq_s64(a: int64x2_t) -> i64 {
710    unsafe { simd_reduce_add_unordered(a) }
711}
712#[doc = "Add across vector"]
713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
714#[inline]
715#[target_feature(enable = "neon")]
716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
717#[cfg_attr(test, assert_instr(addp))]
718pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
719    unsafe { simd_reduce_add_unordered(a) }
720}
721#[doc = "Multi-vector floating-point absolute maximum"]
722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
723#[inline]
724#[target_feature(enable = "neon,faminmax")]
725#[cfg_attr(test, assert_instr(nop))]
726#[unstable(feature = "faminmax", issue = "137933")]
727pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
728    unsafe extern "unadjusted" {
729        #[cfg_attr(
730            any(target_arch = "aarch64", target_arch = "arm64ec"),
731            link_name = "llvm.aarch64.neon.famax.v2f32"
732        )]
733        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
734    }
735    unsafe { _vamax_f32(a, b) }
736}
737#[doc = "Multi-vector floating-point absolute maximum"]
738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
739#[inline]
740#[target_feature(enable = "neon,faminmax")]
741#[cfg_attr(test, assert_instr(nop))]
742#[unstable(feature = "faminmax", issue = "137933")]
743pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
744    unsafe extern "unadjusted" {
745        #[cfg_attr(
746            any(target_arch = "aarch64", target_arch = "arm64ec"),
747            link_name = "llvm.aarch64.neon.famax.v4f32"
748        )]
749        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
750    }
751    unsafe { _vamaxq_f32(a, b) }
752}
753#[doc = "Multi-vector floating-point absolute maximum"]
754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
755#[inline]
756#[target_feature(enable = "neon,faminmax")]
757#[cfg_attr(test, assert_instr(nop))]
758#[unstable(feature = "faminmax", issue = "137933")]
759pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
760    unsafe extern "unadjusted" {
761        #[cfg_attr(
762            any(target_arch = "aarch64", target_arch = "arm64ec"),
763            link_name = "llvm.aarch64.neon.famax.v2f64"
764        )]
765        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
766    }
767    unsafe { _vamaxq_f64(a, b) }
768}
769#[doc = "Multi-vector floating-point absolute minimum"]
770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
771#[inline]
772#[target_feature(enable = "neon,faminmax")]
773#[cfg_attr(test, assert_instr(nop))]
774#[unstable(feature = "faminmax", issue = "137933")]
775pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
776    unsafe extern "unadjusted" {
777        #[cfg_attr(
778            any(target_arch = "aarch64", target_arch = "arm64ec"),
779            link_name = "llvm.aarch64.neon.famin.v2f32"
780        )]
781        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
782    }
783    unsafe { _vamin_f32(a, b) }
784}
785#[doc = "Multi-vector floating-point absolute minimum"]
786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
787#[inline]
788#[target_feature(enable = "neon,faminmax")]
789#[cfg_attr(test, assert_instr(nop))]
790#[unstable(feature = "faminmax", issue = "137933")]
791pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
792    unsafe extern "unadjusted" {
793        #[cfg_attr(
794            any(target_arch = "aarch64", target_arch = "arm64ec"),
795            link_name = "llvm.aarch64.neon.famin.v4f32"
796        )]
797        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
798    }
799    unsafe { _vaminq_f32(a, b) }
800}
801#[doc = "Multi-vector floating-point absolute minimum"]
802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
803#[inline]
804#[target_feature(enable = "neon,faminmax")]
805#[cfg_attr(test, assert_instr(nop))]
806#[unstable(feature = "faminmax", issue = "137933")]
807pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
808    unsafe extern "unadjusted" {
809        #[cfg_attr(
810            any(target_arch = "aarch64", target_arch = "arm64ec"),
811            link_name = "llvm.aarch64.neon.famin.v2f64"
812        )]
813        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
814    }
815    unsafe { _vaminq_f64(a, b) }
816}
817#[doc = "Bit clear and exclusive OR"]
818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
819#[inline]
820#[target_feature(enable = "neon,sha3")]
821#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
822#[cfg_attr(test, assert_instr(bcax))]
823pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
824    unsafe extern "unadjusted" {
825        #[cfg_attr(
826            any(target_arch = "aarch64", target_arch = "arm64ec"),
827            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
828        )]
829        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
830    }
831    unsafe { _vbcaxq_s8(a, b, c) }
832}
833#[doc = "Bit clear and exclusive OR"]
834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
835#[inline]
836#[target_feature(enable = "neon,sha3")]
837#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
838#[cfg_attr(test, assert_instr(bcax))]
839pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
840    unsafe extern "unadjusted" {
841        #[cfg_attr(
842            any(target_arch = "aarch64", target_arch = "arm64ec"),
843            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
844        )]
845        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
846    }
847    unsafe { _vbcaxq_s16(a, b, c) }
848}
849#[doc = "Bit clear and exclusive OR"]
850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
851#[inline]
852#[target_feature(enable = "neon,sha3")]
853#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
854#[cfg_attr(test, assert_instr(bcax))]
855pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
856    unsafe extern "unadjusted" {
857        #[cfg_attr(
858            any(target_arch = "aarch64", target_arch = "arm64ec"),
859            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
860        )]
861        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
862    }
863    unsafe { _vbcaxq_s32(a, b, c) }
864}
865#[doc = "Bit clear and exclusive OR"]
866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
867#[inline]
868#[target_feature(enable = "neon,sha3")]
869#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
870#[cfg_attr(test, assert_instr(bcax))]
871pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
872    unsafe extern "unadjusted" {
873        #[cfg_attr(
874            any(target_arch = "aarch64", target_arch = "arm64ec"),
875            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
876        )]
877        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
878    }
879    unsafe { _vbcaxq_s64(a, b, c) }
880}
881#[doc = "Bit clear and exclusive OR"]
882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
883#[inline]
884#[target_feature(enable = "neon,sha3")]
885#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
886#[cfg_attr(test, assert_instr(bcax))]
887pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
888    unsafe extern "unadjusted" {
889        #[cfg_attr(
890            any(target_arch = "aarch64", target_arch = "arm64ec"),
891            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
892        )]
893        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
894    }
895    unsafe { _vbcaxq_u8(a, b, c) }
896}
897#[doc = "Bit clear and exclusive OR"]
898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
899#[inline]
900#[target_feature(enable = "neon,sha3")]
901#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
902#[cfg_attr(test, assert_instr(bcax))]
903pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
904    unsafe extern "unadjusted" {
905        #[cfg_attr(
906            any(target_arch = "aarch64", target_arch = "arm64ec"),
907            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
908        )]
909        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
910    }
911    unsafe { _vbcaxq_u16(a, b, c) }
912}
913#[doc = "Bit clear and exclusive OR"]
914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
915#[inline]
916#[target_feature(enable = "neon,sha3")]
917#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
918#[cfg_attr(test, assert_instr(bcax))]
919pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
920    unsafe extern "unadjusted" {
921        #[cfg_attr(
922            any(target_arch = "aarch64", target_arch = "arm64ec"),
923            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
924        )]
925        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
926    }
927    unsafe { _vbcaxq_u32(a, b, c) }
928}
929#[doc = "Bit clear and exclusive OR"]
930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
931#[inline]
932#[target_feature(enable = "neon,sha3")]
933#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
934#[cfg_attr(test, assert_instr(bcax))]
935pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
936    unsafe extern "unadjusted" {
937        #[cfg_attr(
938            any(target_arch = "aarch64", target_arch = "arm64ec"),
939            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
940        )]
941        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
942    }
943    unsafe { _vbcaxq_u64(a, b, c) }
944}
945#[doc = "Floating-point complex add"]
946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
947#[inline]
948#[target_feature(enable = "neon,fp16")]
949#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
950#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
951#[cfg(not(target_arch = "arm64ec"))]
952#[cfg_attr(test, assert_instr(fcadd))]
953pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
954    unsafe extern "unadjusted" {
955        #[cfg_attr(
956            any(target_arch = "aarch64", target_arch = "arm64ec"),
957            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
958        )]
959        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
960    }
961    unsafe { _vcadd_rot270_f16(a, b) }
962}
963#[doc = "Floating-point complex add"]
964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
965#[inline]
966#[target_feature(enable = "neon,fp16")]
967#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
968#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
969#[cfg(not(target_arch = "arm64ec"))]
970#[cfg_attr(test, assert_instr(fcadd))]
971pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
972    unsafe extern "unadjusted" {
973        #[cfg_attr(
974            any(target_arch = "aarch64", target_arch = "arm64ec"),
975            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
976        )]
977        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
978    }
979    unsafe { _vcaddq_rot270_f16(a, b) }
980}
981#[doc = "Floating-point complex add"]
982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
983#[inline]
984#[target_feature(enable = "neon,fcma")]
985#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
986#[cfg_attr(test, assert_instr(fcadd))]
987pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
988    unsafe extern "unadjusted" {
989        #[cfg_attr(
990            any(target_arch = "aarch64", target_arch = "arm64ec"),
991            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
992        )]
993        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
994    }
995    unsafe { _vcadd_rot270_f32(a, b) }
996}
997#[doc = "Floating-point complex add"]
998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
999#[inline]
1000#[target_feature(enable = "neon,fcma")]
1001#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1002#[cfg_attr(test, assert_instr(fcadd))]
1003pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1004    unsafe extern "unadjusted" {
1005        #[cfg_attr(
1006            any(target_arch = "aarch64", target_arch = "arm64ec"),
1007            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1008        )]
1009        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1010    }
1011    unsafe { _vcaddq_rot270_f32(a, b) }
1012}
1013#[doc = "Floating-point complex add"]
1014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1015#[inline]
1016#[target_feature(enable = "neon,fcma")]
1017#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1018#[cfg_attr(test, assert_instr(fcadd))]
1019pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1020    unsafe extern "unadjusted" {
1021        #[cfg_attr(
1022            any(target_arch = "aarch64", target_arch = "arm64ec"),
1023            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1024        )]
1025        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1026    }
1027    unsafe { _vcaddq_rot270_f64(a, b) }
1028}
1029#[doc = "Floating-point complex add"]
1030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1031#[inline]
1032#[target_feature(enable = "neon,fp16")]
1033#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1035#[cfg(not(target_arch = "arm64ec"))]
1036#[cfg_attr(test, assert_instr(fcadd))]
1037pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1038    unsafe extern "unadjusted" {
1039        #[cfg_attr(
1040            any(target_arch = "aarch64", target_arch = "arm64ec"),
1041            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1042        )]
1043        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1044    }
1045    unsafe { _vcadd_rot90_f16(a, b) }
1046}
1047#[doc = "Floating-point complex add"]
1048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1049#[inline]
1050#[target_feature(enable = "neon,fp16")]
1051#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1052#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1053#[cfg(not(target_arch = "arm64ec"))]
1054#[cfg_attr(test, assert_instr(fcadd))]
1055pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1056    unsafe extern "unadjusted" {
1057        #[cfg_attr(
1058            any(target_arch = "aarch64", target_arch = "arm64ec"),
1059            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1060        )]
1061        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1062    }
1063    unsafe { _vcaddq_rot90_f16(a, b) }
1064}
1065#[doc = "Floating-point complex add"]
1066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1067#[inline]
1068#[target_feature(enable = "neon,fcma")]
1069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1070#[cfg_attr(test, assert_instr(fcadd))]
1071pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1072    unsafe extern "unadjusted" {
1073        #[cfg_attr(
1074            any(target_arch = "aarch64", target_arch = "arm64ec"),
1075            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1076        )]
1077        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1078    }
1079    unsafe { _vcadd_rot90_f32(a, b) }
1080}
1081#[doc = "Floating-point complex add"]
1082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1083#[inline]
1084#[target_feature(enable = "neon,fcma")]
1085#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1086#[cfg_attr(test, assert_instr(fcadd))]
1087pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1088    unsafe extern "unadjusted" {
1089        #[cfg_attr(
1090            any(target_arch = "aarch64", target_arch = "arm64ec"),
1091            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1092        )]
1093        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1094    }
1095    unsafe { _vcaddq_rot90_f32(a, b) }
1096}
1097#[doc = "Floating-point complex add"]
1098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1099#[inline]
1100#[target_feature(enable = "neon,fcma")]
1101#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1102#[cfg_attr(test, assert_instr(fcadd))]
1103pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1104    unsafe extern "unadjusted" {
1105        #[cfg_attr(
1106            any(target_arch = "aarch64", target_arch = "arm64ec"),
1107            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1108        )]
1109        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1110    }
1111    unsafe { _vcaddq_rot90_f64(a, b) }
1112}
1113#[doc = "Floating-point absolute compare greater than or equal"]
1114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1115#[inline]
1116#[target_feature(enable = "neon")]
1117#[cfg_attr(test, assert_instr(facge))]
1118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1119pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1120    unsafe extern "unadjusted" {
1121        #[cfg_attr(
1122            any(target_arch = "aarch64", target_arch = "arm64ec"),
1123            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1124        )]
1125        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1126    }
1127    unsafe { _vcage_f64(a, b) }
1128}
1129#[doc = "Floating-point absolute compare greater than or equal"]
1130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1131#[inline]
1132#[target_feature(enable = "neon")]
1133#[cfg_attr(test, assert_instr(facge))]
1134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1135pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1136    unsafe extern "unadjusted" {
1137        #[cfg_attr(
1138            any(target_arch = "aarch64", target_arch = "arm64ec"),
1139            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1140        )]
1141        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1142    }
1143    unsafe { _vcageq_f64(a, b) }
1144}
1145#[doc = "Floating-point absolute compare greater than or equal"]
1146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1147#[inline]
1148#[target_feature(enable = "neon")]
1149#[cfg_attr(test, assert_instr(facge))]
1150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1151pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1152    unsafe extern "unadjusted" {
1153        #[cfg_attr(
1154            any(target_arch = "aarch64", target_arch = "arm64ec"),
1155            link_name = "llvm.aarch64.neon.facge.i64.f64"
1156        )]
1157        fn _vcaged_f64(a: f64, b: f64) -> u64;
1158    }
1159    unsafe { _vcaged_f64(a, b) }
1160}
1161#[doc = "Floating-point absolute compare greater than or equal"]
1162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1163#[inline]
1164#[target_feature(enable = "neon")]
1165#[cfg_attr(test, assert_instr(facge))]
1166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1167pub fn vcages_f32(a: f32, b: f32) -> u32 {
1168    unsafe extern "unadjusted" {
1169        #[cfg_attr(
1170            any(target_arch = "aarch64", target_arch = "arm64ec"),
1171            link_name = "llvm.aarch64.neon.facge.i32.f32"
1172        )]
1173        fn _vcages_f32(a: f32, b: f32) -> u32;
1174    }
1175    unsafe { _vcages_f32(a, b) }
1176}
1177#[doc = "Floating-point absolute compare greater than or equal"]
1178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1179#[inline]
1180#[cfg_attr(test, assert_instr(facge))]
1181#[target_feature(enable = "neon,fp16")]
1182#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1183#[cfg(not(target_arch = "arm64ec"))]
1184pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1185    unsafe extern "unadjusted" {
1186        #[cfg_attr(
1187            any(target_arch = "aarch64", target_arch = "arm64ec"),
1188            link_name = "llvm.aarch64.neon.facge.i32.f16"
1189        )]
1190        fn _vcageh_f16(a: f16, b: f16) -> i32;
1191    }
1192    unsafe { _vcageh_f16(a, b) as u16 }
1193}
1194#[doc = "Floating-point absolute compare greater than"]
1195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1196#[inline]
1197#[target_feature(enable = "neon")]
1198#[cfg_attr(test, assert_instr(facgt))]
1199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1200pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1201    unsafe extern "unadjusted" {
1202        #[cfg_attr(
1203            any(target_arch = "aarch64", target_arch = "arm64ec"),
1204            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1205        )]
1206        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1207    }
1208    unsafe { _vcagt_f64(a, b) }
1209}
1210#[doc = "Floating-point absolute compare greater than"]
1211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1212#[inline]
1213#[target_feature(enable = "neon")]
1214#[cfg_attr(test, assert_instr(facgt))]
1215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1216pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1217    unsafe extern "unadjusted" {
1218        #[cfg_attr(
1219            any(target_arch = "aarch64", target_arch = "arm64ec"),
1220            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1221        )]
1222        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1223    }
1224    unsafe { _vcagtq_f64(a, b) }
1225}
1226#[doc = "Floating-point absolute compare greater than"]
1227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1228#[inline]
1229#[target_feature(enable = "neon")]
1230#[cfg_attr(test, assert_instr(facgt))]
1231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1232pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1233    unsafe extern "unadjusted" {
1234        #[cfg_attr(
1235            any(target_arch = "aarch64", target_arch = "arm64ec"),
1236            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1237        )]
1238        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1239    }
1240    unsafe { _vcagtd_f64(a, b) }
1241}
1242#[doc = "Floating-point absolute compare greater than"]
1243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1244#[inline]
1245#[target_feature(enable = "neon")]
1246#[cfg_attr(test, assert_instr(facgt))]
1247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1248pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1249    unsafe extern "unadjusted" {
1250        #[cfg_attr(
1251            any(target_arch = "aarch64", target_arch = "arm64ec"),
1252            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1253        )]
1254        fn _vcagts_f32(a: f32, b: f32) -> u32;
1255    }
1256    unsafe { _vcagts_f32(a, b) }
1257}
1258#[doc = "Floating-point absolute compare greater than"]
1259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1260#[inline]
1261#[cfg_attr(test, assert_instr(facgt))]
1262#[target_feature(enable = "neon,fp16")]
1263#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1264#[cfg(not(target_arch = "arm64ec"))]
1265pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1266    unsafe extern "unadjusted" {
1267        #[cfg_attr(
1268            any(target_arch = "aarch64", target_arch = "arm64ec"),
1269            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1270        )]
1271        fn _vcagth_f16(a: f16, b: f16) -> i32;
1272    }
1273    unsafe { _vcagth_f16(a, b) as u16 }
1274}
1275#[doc = "Floating-point absolute compare less than or equal"]
1276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1277#[inline]
1278#[target_feature(enable = "neon")]
1279#[cfg_attr(test, assert_instr(facge))]
1280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1281pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1282    vcage_f64(b, a)
1283}
1284#[doc = "Floating-point absolute compare less than or equal"]
1285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1286#[inline]
1287#[target_feature(enable = "neon")]
1288#[cfg_attr(test, assert_instr(facge))]
1289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1290pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1291    vcageq_f64(b, a)
1292}
1293#[doc = "Floating-point absolute compare less than or equal"]
1294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1295#[inline]
1296#[target_feature(enable = "neon")]
1297#[cfg_attr(test, assert_instr(facge))]
1298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1299pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1300    vcaged_f64(b, a)
1301}
1302#[doc = "Floating-point absolute compare less than or equal"]
1303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1304#[inline]
1305#[target_feature(enable = "neon")]
1306#[cfg_attr(test, assert_instr(facge))]
1307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1308pub fn vcales_f32(a: f32, b: f32) -> u32 {
1309    vcages_f32(b, a)
1310}
1311#[doc = "Floating-point absolute compare less than or equal"]
1312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1313#[inline]
1314#[cfg_attr(test, assert_instr(facge))]
1315#[target_feature(enable = "neon,fp16")]
1316#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1317#[cfg(not(target_arch = "arm64ec"))]
1318pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1319    vcageh_f16(b, a)
1320}
1321#[doc = "Floating-point absolute compare less than"]
1322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1323#[inline]
1324#[target_feature(enable = "neon")]
1325#[cfg_attr(test, assert_instr(facgt))]
1326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1327pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1328    vcagt_f64(b, a)
1329}
1330#[doc = "Floating-point absolute compare less than"]
1331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1332#[inline]
1333#[target_feature(enable = "neon")]
1334#[cfg_attr(test, assert_instr(facgt))]
1335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1336pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1337    vcagtq_f64(b, a)
1338}
1339#[doc = "Floating-point absolute compare less than"]
1340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1341#[inline]
1342#[target_feature(enable = "neon")]
1343#[cfg_attr(test, assert_instr(facgt))]
1344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1345pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1346    vcagtd_f64(b, a)
1347}
1348#[doc = "Floating-point absolute compare less than"]
1349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1350#[inline]
1351#[target_feature(enable = "neon")]
1352#[cfg_attr(test, assert_instr(facgt))]
1353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1354pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1355    vcagts_f32(b, a)
1356}
1357#[doc = "Floating-point absolute compare less than"]
1358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1359#[inline]
1360#[cfg_attr(test, assert_instr(facgt))]
1361#[target_feature(enable = "neon,fp16")]
1362#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1363#[cfg(not(target_arch = "arm64ec"))]
1364pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1365    vcagth_f16(b, a)
1366}
1367#[doc = "Floating-point compare equal"]
1368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1369#[inline]
1370#[target_feature(enable = "neon")]
1371#[cfg_attr(test, assert_instr(fcmeq))]
1372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1373pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1374    unsafe { simd_eq(a, b) }
1375}
1376#[doc = "Floating-point compare equal"]
1377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1378#[inline]
1379#[target_feature(enable = "neon")]
1380#[cfg_attr(test, assert_instr(fcmeq))]
1381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1382pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1383    unsafe { simd_eq(a, b) }
1384}
1385#[doc = "Compare bitwise Equal (vector)"]
1386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1387#[inline]
1388#[target_feature(enable = "neon")]
1389#[cfg_attr(test, assert_instr(cmeq))]
1390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1391pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1392    unsafe { simd_eq(a, b) }
1393}
1394#[doc = "Compare bitwise Equal (vector)"]
1395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1396#[inline]
1397#[target_feature(enable = "neon")]
1398#[cfg_attr(test, assert_instr(cmeq))]
1399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1400pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1401    unsafe { simd_eq(a, b) }
1402}
1403#[doc = "Compare bitwise Equal (vector)"]
1404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1405#[inline]
1406#[target_feature(enable = "neon")]
1407#[cfg_attr(test, assert_instr(cmeq))]
1408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1409pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1410    unsafe { simd_eq(a, b) }
1411}
1412#[doc = "Compare bitwise Equal (vector)"]
1413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1414#[inline]
1415#[target_feature(enable = "neon")]
1416#[cfg_attr(test, assert_instr(cmeq))]
1417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1418pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1419    unsafe { simd_eq(a, b) }
1420}
1421#[doc = "Compare bitwise Equal (vector)"]
1422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1423#[inline]
1424#[target_feature(enable = "neon")]
1425#[cfg_attr(test, assert_instr(cmeq))]
1426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1427pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1428    unsafe { simd_eq(a, b) }
1429}
1430#[doc = "Compare bitwise Equal (vector)"]
1431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1432#[inline]
1433#[target_feature(enable = "neon")]
1434#[cfg_attr(test, assert_instr(cmeq))]
1435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1436pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1437    unsafe { simd_eq(a, b) }
1438}
1439#[doc = "Floating-point compare equal"]
1440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1441#[inline]
1442#[target_feature(enable = "neon")]
1443#[cfg_attr(test, assert_instr(fcmp))]
1444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1445pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1446    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1447}
1448#[doc = "Floating-point compare equal"]
1449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1450#[inline]
1451#[target_feature(enable = "neon")]
1452#[cfg_attr(test, assert_instr(fcmp))]
1453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1454pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1455    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1456}
1457#[doc = "Compare bitwise equal"]
1458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1459#[inline]
1460#[target_feature(enable = "neon")]
1461#[cfg_attr(test, assert_instr(cmp))]
1462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1463pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1464    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1465}
1466#[doc = "Compare bitwise equal"]
1467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1468#[inline]
1469#[target_feature(enable = "neon")]
1470#[cfg_attr(test, assert_instr(cmp))]
1471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1472pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1473    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1474}
1475#[doc = "Floating-point compare equal"]
1476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1477#[inline]
1478#[cfg_attr(test, assert_instr(fcmp))]
1479#[target_feature(enable = "neon,fp16")]
1480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1481#[cfg(not(target_arch = "arm64ec"))]
1482pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1483    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1484}
1485#[doc = "Floating-point compare bitwise equal to zero"]
1486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1487#[inline]
1488#[cfg_attr(test, assert_instr(fcmeq))]
1489#[target_feature(enable = "neon,fp16")]
1490#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1491#[cfg(not(target_arch = "arm64ec"))]
1492pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1493    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1494    unsafe { simd_eq(a, transmute(b)) }
1495}
1496#[doc = "Floating-point compare bitwise equal to zero"]
1497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1498#[inline]
1499#[cfg_attr(test, assert_instr(fcmeq))]
1500#[target_feature(enable = "neon,fp16")]
1501#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1502#[cfg(not(target_arch = "arm64ec"))]
1503pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1504    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1505    unsafe { simd_eq(a, transmute(b)) }
1506}
1507#[doc = "Floating-point compare bitwise equal to zero"]
1508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1509#[inline]
1510#[target_feature(enable = "neon")]
1511#[cfg_attr(test, assert_instr(fcmeq))]
1512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1513pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1514    let b: f32x2 = f32x2::new(0.0, 0.0);
1515    unsafe { simd_eq(a, transmute(b)) }
1516}
1517#[doc = "Floating-point compare bitwise equal to zero"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1519#[inline]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmeq))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1524    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1525    unsafe { simd_eq(a, transmute(b)) }
1526}
1527#[doc = "Floating-point compare bitwise equal to zero"]
1528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1529#[inline]
1530#[target_feature(enable = "neon")]
1531#[cfg_attr(test, assert_instr(fcmeq))]
1532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1533pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1534    let b: f64 = 0.0;
1535    unsafe { simd_eq(a, transmute(b)) }
1536}
1537#[doc = "Floating-point compare bitwise equal to zero"]
1538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1539#[inline]
1540#[target_feature(enable = "neon")]
1541#[cfg_attr(test, assert_instr(fcmeq))]
1542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1543pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1544    let b: f64x2 = f64x2::new(0.0, 0.0);
1545    unsafe { simd_eq(a, transmute(b)) }
1546}
1547#[doc = "Signed compare bitwise equal to zero"]
1548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1549#[inline]
1550#[target_feature(enable = "neon")]
1551#[cfg_attr(test, assert_instr(cmeq))]
1552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1553pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1554    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1555    unsafe { simd_eq(a, transmute(b)) }
1556}
1557#[doc = "Signed compare bitwise equal to zero"]
1558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1559#[inline]
1560#[target_feature(enable = "neon")]
1561#[cfg_attr(test, assert_instr(cmeq))]
1562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1563pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1564    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1565    unsafe { simd_eq(a, transmute(b)) }
1566}
1567#[doc = "Signed compare bitwise equal to zero"]
1568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1569#[inline]
1570#[target_feature(enable = "neon")]
1571#[cfg_attr(test, assert_instr(cmeq))]
1572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1573pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1574    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1575    unsafe { simd_eq(a, transmute(b)) }
1576}
1577#[doc = "Signed compare bitwise equal to zero"]
1578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1579#[inline]
1580#[target_feature(enable = "neon")]
1581#[cfg_attr(test, assert_instr(cmeq))]
1582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1583pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1584    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1585    unsafe { simd_eq(a, transmute(b)) }
1586}
1587#[doc = "Signed compare bitwise equal to zero"]
1588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1589#[inline]
1590#[target_feature(enable = "neon")]
1591#[cfg_attr(test, assert_instr(cmeq))]
1592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1593pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1594    let b: i32x2 = i32x2::new(0, 0);
1595    unsafe { simd_eq(a, transmute(b)) }
1596}
1597#[doc = "Signed compare bitwise equal to zero"]
1598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1599#[inline]
1600#[target_feature(enable = "neon")]
1601#[cfg_attr(test, assert_instr(cmeq))]
1602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1603pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1604    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1605    unsafe { simd_eq(a, transmute(b)) }
1606}
1607#[doc = "Signed compare bitwise equal to zero"]
1608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1609#[inline]
1610#[target_feature(enable = "neon")]
1611#[cfg_attr(test, assert_instr(cmeq))]
1612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1613pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1614    let b: i64x1 = i64x1::new(0);
1615    unsafe { simd_eq(a, transmute(b)) }
1616}
1617#[doc = "Signed compare bitwise equal to zero"]
1618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1619#[inline]
1620#[target_feature(enable = "neon")]
1621#[cfg_attr(test, assert_instr(cmeq))]
1622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1623pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1624    let b: i64x2 = i64x2::new(0, 0);
1625    unsafe { simd_eq(a, transmute(b)) }
1626}
1627#[doc = "Signed compare bitwise equal to zero"]
1628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1629#[inline]
1630#[target_feature(enable = "neon")]
1631#[cfg_attr(test, assert_instr(cmeq))]
1632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1633pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1634    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1635    unsafe { simd_eq(a, transmute(b)) }
1636}
1637#[doc = "Signed compare bitwise equal to zero"]
1638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1639#[inline]
1640#[target_feature(enable = "neon")]
1641#[cfg_attr(test, assert_instr(cmeq))]
1642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1643pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1644    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1645    unsafe { simd_eq(a, transmute(b)) }
1646}
1647#[doc = "Signed compare bitwise equal to zero"]
1648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1649#[inline]
1650#[target_feature(enable = "neon")]
1651#[cfg_attr(test, assert_instr(cmeq))]
1652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1653pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1654    let b: i64x1 = i64x1::new(0);
1655    unsafe { simd_eq(a, transmute(b)) }
1656}
1657#[doc = "Signed compare bitwise equal to zero"]
1658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1659#[inline]
1660#[target_feature(enable = "neon")]
1661#[cfg_attr(test, assert_instr(cmeq))]
1662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1663pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1664    let b: i64x2 = i64x2::new(0, 0);
1665    unsafe { simd_eq(a, transmute(b)) }
1666}
1667#[doc = "Unsigned compare bitwise equal to zero"]
1668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1669#[inline]
1670#[target_feature(enable = "neon")]
1671#[cfg_attr(test, assert_instr(cmeq))]
1672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1673pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1674    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1675    unsafe { simd_eq(a, transmute(b)) }
1676}
1677#[doc = "Unsigned compare bitwise equal to zero"]
1678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1679#[inline]
1680#[target_feature(enable = "neon")]
1681#[cfg_attr(test, assert_instr(cmeq))]
1682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1683pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1684    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1685    unsafe { simd_eq(a, transmute(b)) }
1686}
1687#[doc = "Unsigned compare bitwise equal to zero"]
1688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1689#[inline]
1690#[target_feature(enable = "neon")]
1691#[cfg_attr(test, assert_instr(cmeq))]
1692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1693pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1694    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1695    unsafe { simd_eq(a, transmute(b)) }
1696}
1697#[doc = "Unsigned compare bitwise equal to zero"]
1698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1699#[inline]
1700#[target_feature(enable = "neon")]
1701#[cfg_attr(test, assert_instr(cmeq))]
1702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1703pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1704    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1705    unsafe { simd_eq(a, transmute(b)) }
1706}
1707#[doc = "Unsigned compare bitwise equal to zero"]
1708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1709#[inline]
1710#[target_feature(enable = "neon")]
1711#[cfg_attr(test, assert_instr(cmeq))]
1712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1713pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1714    let b: u32x2 = u32x2::new(0, 0);
1715    unsafe { simd_eq(a, transmute(b)) }
1716}
1717#[doc = "Unsigned compare bitwise equal to zero"]
1718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1719#[inline]
1720#[target_feature(enable = "neon")]
1721#[cfg_attr(test, assert_instr(cmeq))]
1722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1723pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1724    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1725    unsafe { simd_eq(a, transmute(b)) }
1726}
1727#[doc = "Unsigned compare bitwise equal to zero"]
1728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1729#[inline]
1730#[target_feature(enable = "neon")]
1731#[cfg_attr(test, assert_instr(cmeq))]
1732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1733pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1734    let b: u64x1 = u64x1::new(0);
1735    unsafe { simd_eq(a, transmute(b)) }
1736}
1737#[doc = "Unsigned compare bitwise equal to zero"]
1738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1739#[inline]
1740#[target_feature(enable = "neon")]
1741#[cfg_attr(test, assert_instr(cmeq))]
1742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1743pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1744    let b: u64x2 = u64x2::new(0, 0);
1745    unsafe { simd_eq(a, transmute(b)) }
1746}
1747#[doc = "Compare bitwise equal to zero"]
1748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1749#[inline]
1750#[target_feature(enable = "neon")]
1751#[cfg_attr(test, assert_instr(cmp))]
1752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1753pub fn vceqzd_s64(a: i64) -> u64 {
1754    unsafe { transmute(vceqz_s64(transmute(a))) }
1755}
1756#[doc = "Compare bitwise equal to zero"]
1757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1758#[inline]
1759#[target_feature(enable = "neon")]
1760#[cfg_attr(test, assert_instr(cmp))]
1761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1762pub fn vceqzd_u64(a: u64) -> u64 {
1763    unsafe { transmute(vceqz_u64(transmute(a))) }
1764}
1765#[doc = "Floating-point compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1767#[inline]
1768#[cfg_attr(test, assert_instr(fcmp))]
1769#[target_feature(enable = "neon,fp16")]
1770#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1771#[cfg(not(target_arch = "arm64ec"))]
1772pub fn vceqzh_f16(a: f16) -> u16 {
1773    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1774}
1775#[doc = "Floating-point compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1777#[inline]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(fcmp))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzs_f32(a: f32) -> u32 {
1782    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1783}
1784#[doc = "Floating-point compare bitwise equal to zero"]
1785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1786#[inline]
1787#[target_feature(enable = "neon")]
1788#[cfg_attr(test, assert_instr(fcmp))]
1789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1790pub fn vceqzd_f64(a: f64) -> u64 {
1791    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1792}
1793#[doc = "Floating-point compare greater than or equal"]
1794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1795#[inline]
1796#[target_feature(enable = "neon")]
1797#[cfg_attr(test, assert_instr(fcmge))]
1798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1799pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1800    unsafe { simd_ge(a, b) }
1801}
1802#[doc = "Floating-point compare greater than or equal"]
1803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1804#[inline]
1805#[target_feature(enable = "neon")]
1806#[cfg_attr(test, assert_instr(fcmge))]
1807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1808pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1809    unsafe { simd_ge(a, b) }
1810}
1811#[doc = "Compare signed greater than or equal"]
1812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1813#[inline]
1814#[target_feature(enable = "neon")]
1815#[cfg_attr(test, assert_instr(cmge))]
1816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1817pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1818    unsafe { simd_ge(a, b) }
1819}
1820#[doc = "Compare signed greater than or equal"]
1821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1822#[inline]
1823#[target_feature(enable = "neon")]
1824#[cfg_attr(test, assert_instr(cmge))]
1825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1826pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1827    unsafe { simd_ge(a, b) }
1828}
1829#[doc = "Compare unsigned greater than or equal"]
1830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1831#[inline]
1832#[target_feature(enable = "neon")]
1833#[cfg_attr(test, assert_instr(cmhs))]
1834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1835pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1836    unsafe { simd_ge(a, b) }
1837}
1838#[doc = "Compare unsigned greater than or equal"]
1839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1840#[inline]
1841#[target_feature(enable = "neon")]
1842#[cfg_attr(test, assert_instr(cmhs))]
1843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1844pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1845    unsafe { simd_ge(a, b) }
1846}
1847#[doc = "Floating-point compare greater than or equal"]
1848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1849#[inline]
1850#[target_feature(enable = "neon")]
1851#[cfg_attr(test, assert_instr(fcmp))]
1852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1853pub fn vcged_f64(a: f64, b: f64) -> u64 {
1854    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1855}
1856#[doc = "Floating-point compare greater than or equal"]
1857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1858#[inline]
1859#[target_feature(enable = "neon")]
1860#[cfg_attr(test, assert_instr(fcmp))]
1861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1862pub fn vcges_f32(a: f32, b: f32) -> u32 {
1863    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1864}
1865#[doc = "Compare greater than or equal"]
1866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1867#[inline]
1868#[target_feature(enable = "neon")]
1869#[cfg_attr(test, assert_instr(cmp))]
1870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1871pub fn vcged_s64(a: i64, b: i64) -> u64 {
1872    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1873}
1874#[doc = "Compare greater than or equal"]
1875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1876#[inline]
1877#[target_feature(enable = "neon")]
1878#[cfg_attr(test, assert_instr(cmp))]
1879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1880pub fn vcged_u64(a: u64, b: u64) -> u64 {
1881    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1882}
1883#[doc = "Floating-point compare greater than or equal"]
1884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1885#[inline]
1886#[cfg_attr(test, assert_instr(fcmp))]
1887#[target_feature(enable = "neon,fp16")]
1888#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1889#[cfg(not(target_arch = "arm64ec"))]
1890pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1891    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1892}
1893#[doc = "Floating-point compare greater than or equal to zero"]
1894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1895#[inline]
1896#[target_feature(enable = "neon")]
1897#[cfg_attr(test, assert_instr(fcmge))]
1898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1899pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1900    let b: f32x2 = f32x2::new(0.0, 0.0);
1901    unsafe { simd_ge(a, transmute(b)) }
1902}
1903#[doc = "Floating-point compare greater than or equal to zero"]
1904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1905#[inline]
1906#[target_feature(enable = "neon")]
1907#[cfg_attr(test, assert_instr(fcmge))]
1908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1909pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1910    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1911    unsafe { simd_ge(a, transmute(b)) }
1912}
1913#[doc = "Floating-point compare greater than or equal to zero"]
1914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1915#[inline]
1916#[target_feature(enable = "neon")]
1917#[cfg_attr(test, assert_instr(fcmge))]
1918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1919pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1920    let b: f64 = 0.0;
1921    unsafe { simd_ge(a, transmute(b)) }
1922}
1923#[doc = "Floating-point compare greater than or equal to zero"]
1924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
1925#[inline]
1926#[target_feature(enable = "neon")]
1927#[cfg_attr(test, assert_instr(fcmge))]
1928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1929pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
1930    let b: f64x2 = f64x2::new(0.0, 0.0);
1931    unsafe { simd_ge(a, transmute(b)) }
1932}
1933#[doc = "Compare signed greater than or equal to zero"]
1934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
1935#[inline]
1936#[target_feature(enable = "neon")]
1937#[cfg_attr(test, assert_instr(cmge))]
1938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1939pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
1940    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1941    unsafe { simd_ge(a, transmute(b)) }
1942}
1943#[doc = "Compare signed greater than or equal to zero"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
1945#[inline]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmge))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
1950    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1951    unsafe { simd_ge(a, transmute(b)) }
1952}
1953#[doc = "Compare signed greater than or equal to zero"]
1954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
1955#[inline]
1956#[target_feature(enable = "neon")]
1957#[cfg_attr(test, assert_instr(cmge))]
1958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1959pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
1960    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1961    unsafe { simd_ge(a, transmute(b)) }
1962}
1963#[doc = "Compare signed greater than or equal to zero"]
1964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
1965#[inline]
1966#[target_feature(enable = "neon")]
1967#[cfg_attr(test, assert_instr(cmge))]
1968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1969pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
1970    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1971    unsafe { simd_ge(a, transmute(b)) }
1972}
1973#[doc = "Compare signed greater than or equal to zero"]
1974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
1975#[inline]
1976#[target_feature(enable = "neon")]
1977#[cfg_attr(test, assert_instr(cmge))]
1978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1979pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
1980    let b: i32x2 = i32x2::new(0, 0);
1981    unsafe { simd_ge(a, transmute(b)) }
1982}
1983#[doc = "Compare signed greater than or equal to zero"]
1984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
1985#[inline]
1986#[target_feature(enable = "neon")]
1987#[cfg_attr(test, assert_instr(cmge))]
1988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1989pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
1990    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1991    unsafe { simd_ge(a, transmute(b)) }
1992}
1993#[doc = "Compare signed greater than or equal to zero"]
1994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
1995#[inline]
1996#[target_feature(enable = "neon")]
1997#[cfg_attr(test, assert_instr(cmge))]
1998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1999pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2000    let b: i64x1 = i64x1::new(0);
2001    unsafe { simd_ge(a, transmute(b)) }
2002}
2003#[doc = "Compare signed greater than or equal to zero"]
2004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2005#[inline]
2006#[target_feature(enable = "neon")]
2007#[cfg_attr(test, assert_instr(cmge))]
2008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2009pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2010    let b: i64x2 = i64x2::new(0, 0);
2011    unsafe { simd_ge(a, transmute(b)) }
2012}
2013#[doc = "Floating-point compare greater than or equal to zero"]
2014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2015#[inline]
2016#[target_feature(enable = "neon")]
2017#[cfg_attr(test, assert_instr(fcmp))]
2018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2019pub fn vcgezd_f64(a: f64) -> u64 {
2020    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2021}
2022#[doc = "Floating-point compare greater than or equal to zero"]
2023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2024#[inline]
2025#[target_feature(enable = "neon")]
2026#[cfg_attr(test, assert_instr(fcmp))]
2027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2028pub fn vcgezs_f32(a: f32) -> u32 {
2029    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2033#[inline]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(nop))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgezd_s64(a: i64) -> u64 {
2038    unsafe { transmute(vcgez_s64(transmute(a))) }
2039}
2040#[doc = "Floating-point compare greater than or equal to zero"]
2041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2042#[inline]
2043#[cfg_attr(test, assert_instr(fcmp))]
2044#[target_feature(enable = "neon,fp16")]
2045#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2046#[cfg(not(target_arch = "arm64ec"))]
2047pub fn vcgezh_f16(a: f16) -> u16 {
2048    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2049}
2050#[doc = "Floating-point compare greater than"]
2051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2052#[inline]
2053#[target_feature(enable = "neon")]
2054#[cfg_attr(test, assert_instr(fcmgt))]
2055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2056pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2057    unsafe { simd_gt(a, b) }
2058}
2059#[doc = "Floating-point compare greater than"]
2060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2061#[inline]
2062#[target_feature(enable = "neon")]
2063#[cfg_attr(test, assert_instr(fcmgt))]
2064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2065pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2066    unsafe { simd_gt(a, b) }
2067}
2068#[doc = "Compare signed greater than"]
2069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2070#[inline]
2071#[target_feature(enable = "neon")]
2072#[cfg_attr(test, assert_instr(cmgt))]
2073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2074pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2075    unsafe { simd_gt(a, b) }
2076}
2077#[doc = "Compare signed greater than"]
2078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2079#[inline]
2080#[target_feature(enable = "neon")]
2081#[cfg_attr(test, assert_instr(cmgt))]
2082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2083pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2084    unsafe { simd_gt(a, b) }
2085}
2086#[doc = "Compare unsigned greater than"]
2087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2088#[inline]
2089#[target_feature(enable = "neon")]
2090#[cfg_attr(test, assert_instr(cmhi))]
2091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2092pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2093    unsafe { simd_gt(a, b) }
2094}
2095#[doc = "Compare unsigned greater than"]
2096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2097#[inline]
2098#[target_feature(enable = "neon")]
2099#[cfg_attr(test, assert_instr(cmhi))]
2100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2101pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2102    unsafe { simd_gt(a, b) }
2103}
2104#[doc = "Floating-point compare greater than"]
2105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2106#[inline]
2107#[target_feature(enable = "neon")]
2108#[cfg_attr(test, assert_instr(fcmp))]
2109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2110pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2111    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2112}
2113#[doc = "Floating-point compare greater than"]
2114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2115#[inline]
2116#[target_feature(enable = "neon")]
2117#[cfg_attr(test, assert_instr(fcmp))]
2118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2119pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2120    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2121}
2122#[doc = "Compare greater than"]
2123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2124#[inline]
2125#[target_feature(enable = "neon")]
2126#[cfg_attr(test, assert_instr(cmp))]
2127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2128pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2129    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2130}
2131#[doc = "Compare greater than"]
2132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2133#[inline]
2134#[target_feature(enable = "neon")]
2135#[cfg_attr(test, assert_instr(cmp))]
2136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2137pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2138    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2139}
2140#[doc = "Floating-point compare greater than"]
2141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2142#[inline]
2143#[cfg_attr(test, assert_instr(fcmp))]
2144#[target_feature(enable = "neon,fp16")]
2145#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2146#[cfg(not(target_arch = "arm64ec"))]
2147pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2148    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2149}
2150#[doc = "Floating-point compare greater than zero"]
2151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2152#[inline]
2153#[target_feature(enable = "neon")]
2154#[cfg_attr(test, assert_instr(fcmgt))]
2155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2156pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2157    let b: f32x2 = f32x2::new(0.0, 0.0);
2158    unsafe { simd_gt(a, transmute(b)) }
2159}
2160#[doc = "Floating-point compare greater than zero"]
2161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2162#[inline]
2163#[target_feature(enable = "neon")]
2164#[cfg_attr(test, assert_instr(fcmgt))]
2165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2166pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2167    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2168    unsafe { simd_gt(a, transmute(b)) }
2169}
2170#[doc = "Floating-point compare greater than zero"]
2171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2172#[inline]
2173#[target_feature(enable = "neon")]
2174#[cfg_attr(test, assert_instr(fcmgt))]
2175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2176pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2177    let b: f64 = 0.0;
2178    unsafe { simd_gt(a, transmute(b)) }
2179}
2180#[doc = "Floating-point compare greater than zero"]
2181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2182#[inline]
2183#[target_feature(enable = "neon")]
2184#[cfg_attr(test, assert_instr(fcmgt))]
2185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2186pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2187    let b: f64x2 = f64x2::new(0.0, 0.0);
2188    unsafe { simd_gt(a, transmute(b)) }
2189}
2190#[doc = "Compare signed greater than zero"]
2191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2192#[inline]
2193#[target_feature(enable = "neon")]
2194#[cfg_attr(test, assert_instr(cmgt))]
2195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2196pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2197    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2198    unsafe { simd_gt(a, transmute(b)) }
2199}
2200#[doc = "Compare signed greater than zero"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2202#[inline]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmgt))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2207    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2208    unsafe { simd_gt(a, transmute(b)) }
2209}
2210#[doc = "Compare signed greater than zero"]
2211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2212#[inline]
2213#[target_feature(enable = "neon")]
2214#[cfg_attr(test, assert_instr(cmgt))]
2215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2216pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2217    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2218    unsafe { simd_gt(a, transmute(b)) }
2219}
2220#[doc = "Compare signed greater than zero"]
2221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2222#[inline]
2223#[target_feature(enable = "neon")]
2224#[cfg_attr(test, assert_instr(cmgt))]
2225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2226pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2227    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2228    unsafe { simd_gt(a, transmute(b)) }
2229}
2230#[doc = "Compare signed greater than zero"]
2231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2232#[inline]
2233#[target_feature(enable = "neon")]
2234#[cfg_attr(test, assert_instr(cmgt))]
2235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2236pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2237    let b: i32x2 = i32x2::new(0, 0);
2238    unsafe { simd_gt(a, transmute(b)) }
2239}
2240#[doc = "Compare signed greater than zero"]
2241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2242#[inline]
2243#[target_feature(enable = "neon")]
2244#[cfg_attr(test, assert_instr(cmgt))]
2245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2246pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2247    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2248    unsafe { simd_gt(a, transmute(b)) }
2249}
2250#[doc = "Compare signed greater than zero"]
2251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2252#[inline]
2253#[target_feature(enable = "neon")]
2254#[cfg_attr(test, assert_instr(cmgt))]
2255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2256pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2257    let b: i64x1 = i64x1::new(0);
2258    unsafe { simd_gt(a, transmute(b)) }
2259}
2260#[doc = "Compare signed greater than zero"]
2261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2262#[inline]
2263#[target_feature(enable = "neon")]
2264#[cfg_attr(test, assert_instr(cmgt))]
2265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2266pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2267    let b: i64x2 = i64x2::new(0, 0);
2268    unsafe { simd_gt(a, transmute(b)) }
2269}
2270#[doc = "Floating-point compare greater than zero"]
2271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2272#[inline]
2273#[target_feature(enable = "neon")]
2274#[cfg_attr(test, assert_instr(fcmp))]
2275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2276pub fn vcgtzd_f64(a: f64) -> u64 {
2277    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2278}
2279#[doc = "Floating-point compare greater than zero"]
2280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2281#[inline]
2282#[target_feature(enable = "neon")]
2283#[cfg_attr(test, assert_instr(fcmp))]
2284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2285pub fn vcgtzs_f32(a: f32) -> u32 {
2286    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2290#[inline]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmp))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtzd_s64(a: i64) -> u64 {
2295    unsafe { transmute(vcgtz_s64(transmute(a))) }
2296}
2297#[doc = "Floating-point compare greater than zero"]
2298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2299#[inline]
2300#[cfg_attr(test, assert_instr(fcmp))]
2301#[target_feature(enable = "neon,fp16")]
2302#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2303#[cfg(not(target_arch = "arm64ec"))]
2304pub fn vcgtzh_f16(a: f16) -> u16 {
2305    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2306}
2307#[doc = "Floating-point compare less than or equal"]
2308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2309#[inline]
2310#[target_feature(enable = "neon")]
2311#[cfg_attr(test, assert_instr(fcmge))]
2312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2313pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2314    unsafe { simd_le(a, b) }
2315}
2316#[doc = "Floating-point compare less than or equal"]
2317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2318#[inline]
2319#[target_feature(enable = "neon")]
2320#[cfg_attr(test, assert_instr(fcmge))]
2321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2322pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2323    unsafe { simd_le(a, b) }
2324}
2325#[doc = "Compare signed less than or equal"]
2326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2327#[inline]
2328#[target_feature(enable = "neon")]
2329#[cfg_attr(test, assert_instr(cmge))]
2330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2331pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2332    unsafe { simd_le(a, b) }
2333}
2334#[doc = "Compare signed less than or equal"]
2335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2336#[inline]
2337#[target_feature(enable = "neon")]
2338#[cfg_attr(test, assert_instr(cmge))]
2339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2340pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2341    unsafe { simd_le(a, b) }
2342}
2343#[doc = "Compare unsigned less than or equal"]
2344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2345#[inline]
2346#[target_feature(enable = "neon")]
2347#[cfg_attr(test, assert_instr(cmhs))]
2348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2349pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2350    unsafe { simd_le(a, b) }
2351}
2352#[doc = "Compare unsigned less than or equal"]
2353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2354#[inline]
2355#[target_feature(enable = "neon")]
2356#[cfg_attr(test, assert_instr(cmhs))]
2357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2358pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2359    unsafe { simd_le(a, b) }
2360}
2361#[doc = "Floating-point compare less than or equal"]
2362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2363#[inline]
2364#[target_feature(enable = "neon")]
2365#[cfg_attr(test, assert_instr(fcmp))]
2366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2367pub fn vcled_f64(a: f64, b: f64) -> u64 {
2368    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2369}
2370#[doc = "Floating-point compare less than or equal"]
2371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2372#[inline]
2373#[target_feature(enable = "neon")]
2374#[cfg_attr(test, assert_instr(fcmp))]
2375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2376pub fn vcles_f32(a: f32, b: f32) -> u32 {
2377    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2378}
2379#[doc = "Compare less than or equal"]
2380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2381#[inline]
2382#[target_feature(enable = "neon")]
2383#[cfg_attr(test, assert_instr(cmp))]
2384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2385pub fn vcled_u64(a: u64, b: u64) -> u64 {
2386    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2387}
2388#[doc = "Compare less than or equal"]
2389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2390#[inline]
2391#[target_feature(enable = "neon")]
2392#[cfg_attr(test, assert_instr(cmp))]
2393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2394pub fn vcled_s64(a: i64, b: i64) -> u64 {
2395    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2396}
2397#[doc = "Floating-point compare less than or equal"]
2398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2399#[inline]
2400#[cfg_attr(test, assert_instr(fcmp))]
2401#[target_feature(enable = "neon,fp16")]
2402#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2403#[cfg(not(target_arch = "arm64ec"))]
2404pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2405    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2406}
2407#[doc = "Floating-point compare less than or equal to zero"]
2408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2409#[inline]
2410#[target_feature(enable = "neon")]
2411#[cfg_attr(test, assert_instr(fcmle))]
2412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2413pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2414    let b: f32x2 = f32x2::new(0.0, 0.0);
2415    unsafe { simd_le(a, transmute(b)) }
2416}
2417#[doc = "Floating-point compare less than or equal to zero"]
2418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2419#[inline]
2420#[target_feature(enable = "neon")]
2421#[cfg_attr(test, assert_instr(fcmle))]
2422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2423pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2424    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2425    unsafe { simd_le(a, transmute(b)) }
2426}
2427#[doc = "Floating-point compare less than or equal to zero"]
2428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2429#[inline]
2430#[target_feature(enable = "neon")]
2431#[cfg_attr(test, assert_instr(fcmle))]
2432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2433pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2434    let b: f64 = 0.0;
2435    unsafe { simd_le(a, transmute(b)) }
2436}
2437#[doc = "Floating-point compare less than or equal to zero"]
2438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2439#[inline]
2440#[target_feature(enable = "neon")]
2441#[cfg_attr(test, assert_instr(fcmle))]
2442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2443pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2444    let b: f64x2 = f64x2::new(0.0, 0.0);
2445    unsafe { simd_le(a, transmute(b)) }
2446}
2447#[doc = "Compare signed less than or equal to zero"]
2448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2449#[inline]
2450#[target_feature(enable = "neon")]
2451#[cfg_attr(test, assert_instr(cmle))]
2452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2453pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2454    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2455    unsafe { simd_le(a, transmute(b)) }
2456}
2457#[doc = "Compare signed less than or equal to zero"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2459#[inline]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmle))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2464    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2465    unsafe { simd_le(a, transmute(b)) }
2466}
2467#[doc = "Compare signed less than or equal to zero"]
2468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2469#[inline]
2470#[target_feature(enable = "neon")]
2471#[cfg_attr(test, assert_instr(cmle))]
2472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2473pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2474    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2475    unsafe { simd_le(a, transmute(b)) }
2476}
2477#[doc = "Compare signed less than or equal to zero"]
2478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2479#[inline]
2480#[target_feature(enable = "neon")]
2481#[cfg_attr(test, assert_instr(cmle))]
2482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2483pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2484    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2485    unsafe { simd_le(a, transmute(b)) }
2486}
2487#[doc = "Compare signed less than or equal to zero"]
2488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2489#[inline]
2490#[target_feature(enable = "neon")]
2491#[cfg_attr(test, assert_instr(cmle))]
2492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2493pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2494    let b: i32x2 = i32x2::new(0, 0);
2495    unsafe { simd_le(a, transmute(b)) }
2496}
2497#[doc = "Compare signed less than or equal to zero"]
2498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2499#[inline]
2500#[target_feature(enable = "neon")]
2501#[cfg_attr(test, assert_instr(cmle))]
2502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2503pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2504    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2505    unsafe { simd_le(a, transmute(b)) }
2506}
2507#[doc = "Compare signed less than or equal to zero"]
2508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2509#[inline]
2510#[target_feature(enable = "neon")]
2511#[cfg_attr(test, assert_instr(cmle))]
2512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2513pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2514    let b: i64x1 = i64x1::new(0);
2515    unsafe { simd_le(a, transmute(b)) }
2516}
2517#[doc = "Compare signed less than or equal to zero"]
2518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2519#[inline]
2520#[target_feature(enable = "neon")]
2521#[cfg_attr(test, assert_instr(cmle))]
2522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2523pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2524    let b: i64x2 = i64x2::new(0, 0);
2525    unsafe { simd_le(a, transmute(b)) }
2526}
2527#[doc = "Floating-point compare less than or equal to zero"]
2528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2529#[inline]
2530#[target_feature(enable = "neon")]
2531#[cfg_attr(test, assert_instr(fcmp))]
2532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2533pub fn vclezd_f64(a: f64) -> u64 {
2534    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2535}
2536#[doc = "Floating-point compare less than or equal to zero"]
2537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2538#[inline]
2539#[target_feature(enable = "neon")]
2540#[cfg_attr(test, assert_instr(fcmp))]
2541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2542pub fn vclezs_f32(a: f32) -> u32 {
2543    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2544}
2545#[doc = "Compare less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2547#[inline]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmp))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclezd_s64(a: i64) -> u64 {
2552    unsafe { transmute(vclez_s64(transmute(a))) }
2553}
2554#[doc = "Floating-point compare less than or equal to zero"]
2555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2556#[inline]
2557#[cfg_attr(test, assert_instr(fcmp))]
2558#[target_feature(enable = "neon,fp16")]
2559#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2560#[cfg(not(target_arch = "arm64ec"))]
2561pub fn vclezh_f16(a: f16) -> u16 {
2562    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2563}
2564#[doc = "Floating-point compare less than"]
2565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2566#[inline]
2567#[target_feature(enable = "neon")]
2568#[cfg_attr(test, assert_instr(fcmgt))]
2569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2570pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2571    unsafe { simd_lt(a, b) }
2572}
2573#[doc = "Floating-point compare less than"]
2574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2575#[inline]
2576#[target_feature(enable = "neon")]
2577#[cfg_attr(test, assert_instr(fcmgt))]
2578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2579pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2580    unsafe { simd_lt(a, b) }
2581}
2582#[doc = "Compare signed less than"]
2583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2584#[inline]
2585#[target_feature(enable = "neon")]
2586#[cfg_attr(test, assert_instr(cmgt))]
2587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2588pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2589    unsafe { simd_lt(a, b) }
2590}
2591#[doc = "Compare signed less than"]
2592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2593#[inline]
2594#[target_feature(enable = "neon")]
2595#[cfg_attr(test, assert_instr(cmgt))]
2596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2597pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2598    unsafe { simd_lt(a, b) }
2599}
2600#[doc = "Compare unsigned less than"]
2601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2602#[inline]
2603#[target_feature(enable = "neon")]
2604#[cfg_attr(test, assert_instr(cmhi))]
2605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2606pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2607    unsafe { simd_lt(a, b) }
2608}
2609#[doc = "Compare unsigned less than"]
2610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2611#[inline]
2612#[target_feature(enable = "neon")]
2613#[cfg_attr(test, assert_instr(cmhi))]
2614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2615pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2616    unsafe { simd_lt(a, b) }
2617}
2618#[doc = "Compare less than"]
2619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2620#[inline]
2621#[target_feature(enable = "neon")]
2622#[cfg_attr(test, assert_instr(cmp))]
2623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2624pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2625    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2626}
2627#[doc = "Compare less than"]
2628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2629#[inline]
2630#[target_feature(enable = "neon")]
2631#[cfg_attr(test, assert_instr(cmp))]
2632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2633pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2634    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2635}
2636#[doc = "Floating-point compare less than"]
2637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2638#[inline]
2639#[cfg_attr(test, assert_instr(fcmp))]
2640#[target_feature(enable = "neon,fp16")]
2641#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2642#[cfg(not(target_arch = "arm64ec"))]
2643pub fn vclth_f16(a: f16, b: f16) -> u16 {
2644    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2645}
2646#[doc = "Floating-point compare less than"]
2647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2648#[inline]
2649#[target_feature(enable = "neon")]
2650#[cfg_attr(test, assert_instr(fcmp))]
2651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2652pub fn vclts_f32(a: f32, b: f32) -> u32 {
2653    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2654}
2655#[doc = "Floating-point compare less than"]
2656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2657#[inline]
2658#[target_feature(enable = "neon")]
2659#[cfg_attr(test, assert_instr(fcmp))]
2660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2661pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2662    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2663}
2664#[doc = "Floating-point compare less than zero"]
2665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2666#[inline]
2667#[target_feature(enable = "neon")]
2668#[cfg_attr(test, assert_instr(fcmlt))]
2669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2670pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2671    let b: f32x2 = f32x2::new(0.0, 0.0);
2672    unsafe { simd_lt(a, transmute(b)) }
2673}
2674#[doc = "Floating-point compare less than zero"]
2675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2676#[inline]
2677#[target_feature(enable = "neon")]
2678#[cfg_attr(test, assert_instr(fcmlt))]
2679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2680pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2681    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2682    unsafe { simd_lt(a, transmute(b)) }
2683}
2684#[doc = "Floating-point compare less than zero"]
2685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2686#[inline]
2687#[target_feature(enable = "neon")]
2688#[cfg_attr(test, assert_instr(fcmlt))]
2689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2690pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2691    let b: f64 = 0.0;
2692    unsafe { simd_lt(a, transmute(b)) }
2693}
2694#[doc = "Floating-point compare less than zero"]
2695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2696#[inline]
2697#[target_feature(enable = "neon")]
2698#[cfg_attr(test, assert_instr(fcmlt))]
2699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2700pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2701    let b: f64x2 = f64x2::new(0.0, 0.0);
2702    unsafe { simd_lt(a, transmute(b)) }
2703}
2704#[doc = "Compare signed less than zero"]
2705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2706#[inline]
2707#[target_feature(enable = "neon")]
2708#[cfg_attr(test, assert_instr(cmlt))]
2709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2710pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2711    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2712    unsafe { simd_lt(a, transmute(b)) }
2713}
2714#[doc = "Compare signed less than zero"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2716#[inline]
2717#[target_feature(enable = "neon")]
2718#[cfg_attr(test, assert_instr(cmlt))]
2719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2720pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2721    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2722    unsafe { simd_lt(a, transmute(b)) }
2723}
2724#[doc = "Compare signed less than zero"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2726#[inline]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(cmlt))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2731    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2732    unsafe { simd_lt(a, transmute(b)) }
2733}
2734#[doc = "Compare signed less than zero"]
2735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2736#[inline]
2737#[target_feature(enable = "neon")]
2738#[cfg_attr(test, assert_instr(cmlt))]
2739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2740pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2741    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2742    unsafe { simd_lt(a, transmute(b)) }
2743}
2744#[doc = "Compare signed less than zero"]
2745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2746#[inline]
2747#[target_feature(enable = "neon")]
2748#[cfg_attr(test, assert_instr(cmlt))]
2749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2750pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2751    let b: i32x2 = i32x2::new(0, 0);
2752    unsafe { simd_lt(a, transmute(b)) }
2753}
2754#[doc = "Compare signed less than zero"]
2755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2756#[inline]
2757#[target_feature(enable = "neon")]
2758#[cfg_attr(test, assert_instr(cmlt))]
2759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2760pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2761    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2762    unsafe { simd_lt(a, transmute(b)) }
2763}
2764#[doc = "Compare signed less than zero"]
2765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2766#[inline]
2767#[target_feature(enable = "neon")]
2768#[cfg_attr(test, assert_instr(cmlt))]
2769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2770pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2771    let b: i64x1 = i64x1::new(0);
2772    unsafe { simd_lt(a, transmute(b)) }
2773}
2774#[doc = "Compare signed less than zero"]
2775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2776#[inline]
2777#[target_feature(enable = "neon")]
2778#[cfg_attr(test, assert_instr(cmlt))]
2779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2780pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2781    let b: i64x2 = i64x2::new(0, 0);
2782    unsafe { simd_lt(a, transmute(b)) }
2783}
2784#[doc = "Floating-point compare less than zero"]
2785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2786#[inline]
2787#[target_feature(enable = "neon")]
2788#[cfg_attr(test, assert_instr(fcmp))]
2789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2790pub fn vcltzd_f64(a: f64) -> u64 {
2791    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2792}
2793#[doc = "Floating-point compare less than zero"]
2794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2795#[inline]
2796#[target_feature(enable = "neon")]
2797#[cfg_attr(test, assert_instr(fcmp))]
2798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2799pub fn vcltzs_f32(a: f32) -> u32 {
2800    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2801}
2802#[doc = "Compare less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2804#[inline]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(asr))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltzd_s64(a: i64) -> u64 {
2809    unsafe { transmute(vcltz_s64(transmute(a))) }
2810}
2811#[doc = "Floating-point compare less than zero"]
2812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2813#[inline]
2814#[cfg_attr(test, assert_instr(fcmp))]
2815#[target_feature(enable = "neon,fp16")]
2816#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2817#[cfg(not(target_arch = "arm64ec"))]
2818pub fn vcltzh_f16(a: f16) -> u16 {
2819    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2820}
2821#[doc = "Floating-point complex multiply accumulate"]
2822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2823#[inline]
2824#[target_feature(enable = "neon,fcma")]
2825#[target_feature(enable = "neon,fp16")]
2826#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2827#[cfg(not(target_arch = "arm64ec"))]
2828#[cfg_attr(test, assert_instr(fcmla))]
2829pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2830    unsafe extern "unadjusted" {
2831        #[cfg_attr(
2832            any(target_arch = "aarch64", target_arch = "arm64ec"),
2833            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2834        )]
2835        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2836    }
2837    unsafe { _vcmla_f16(a, b, c) }
2838}
2839#[doc = "Floating-point complex multiply accumulate"]
2840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2841#[inline]
2842#[target_feature(enable = "neon,fcma")]
2843#[target_feature(enable = "neon,fp16")]
2844#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2845#[cfg(not(target_arch = "arm64ec"))]
2846#[cfg_attr(test, assert_instr(fcmla))]
2847pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2848    unsafe extern "unadjusted" {
2849        #[cfg_attr(
2850            any(target_arch = "aarch64", target_arch = "arm64ec"),
2851            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2852        )]
2853        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2854    }
2855    unsafe { _vcmlaq_f16(a, b, c) }
2856}
2857#[doc = "Floating-point complex multiply accumulate"]
2858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2859#[inline]
2860#[target_feature(enable = "neon,fcma")]
2861#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2862#[cfg_attr(test, assert_instr(fcmla))]
2863pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2864    unsafe extern "unadjusted" {
2865        #[cfg_attr(
2866            any(target_arch = "aarch64", target_arch = "arm64ec"),
2867            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2868        )]
2869        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2870    }
2871    unsafe { _vcmla_f32(a, b, c) }
2872}
2873#[doc = "Floating-point complex multiply accumulate"]
2874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2875#[inline]
2876#[target_feature(enable = "neon,fcma")]
2877#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2878#[cfg_attr(test, assert_instr(fcmla))]
2879pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2880    unsafe extern "unadjusted" {
2881        #[cfg_attr(
2882            any(target_arch = "aarch64", target_arch = "arm64ec"),
2883            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2884        )]
2885        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2886    }
2887    unsafe { _vcmlaq_f32(a, b, c) }
2888}
2889#[doc = "Floating-point complex multiply accumulate"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2891#[inline]
2892#[target_feature(enable = "neon,fcma")]
2893#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2894#[cfg_attr(test, assert_instr(fcmla))]
2895pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2896    unsafe extern "unadjusted" {
2897        #[cfg_attr(
2898            any(target_arch = "aarch64", target_arch = "arm64ec"),
2899            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2900        )]
2901        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2902    }
2903    unsafe { _vcmlaq_f64(a, b, c) }
2904}
2905#[doc = "Floating-point complex multiply accumulate"]
2906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2907#[inline]
2908#[target_feature(enable = "neon,fcma")]
2909#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2910#[rustc_legacy_const_generics(3)]
2911#[target_feature(enable = "neon,fp16")]
2912#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2913#[cfg(not(target_arch = "arm64ec"))]
2914pub fn vcmla_lane_f16<const LANE: i32>(
2915    a: float16x4_t,
2916    b: float16x4_t,
2917    c: float16x4_t,
2918) -> float16x4_t {
2919    static_assert_uimm_bits!(LANE, 1);
2920    unsafe {
2921        let c: float16x4_t = simd_shuffle!(
2922            c,
2923            c,
2924            [
2925                2 * LANE as u32,
2926                2 * LANE as u32 + 1,
2927                2 * LANE as u32,
2928                2 * LANE as u32 + 1
2929            ]
2930        );
2931        vcmla_f16(a, b, c)
2932    }
2933}
2934#[doc = "Floating-point complex multiply accumulate"]
2935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
2936#[inline]
2937#[target_feature(enable = "neon,fcma")]
2938#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2939#[rustc_legacy_const_generics(3)]
2940#[target_feature(enable = "neon,fp16")]
2941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2942#[cfg(not(target_arch = "arm64ec"))]
2943pub fn vcmlaq_lane_f16<const LANE: i32>(
2944    a: float16x8_t,
2945    b: float16x8_t,
2946    c: float16x4_t,
2947) -> float16x8_t {
2948    static_assert_uimm_bits!(LANE, 1);
2949    unsafe {
2950        let c: float16x8_t = simd_shuffle!(
2951            c,
2952            c,
2953            [
2954                2 * LANE as u32,
2955                2 * LANE as u32 + 1,
2956                2 * LANE as u32,
2957                2 * LANE as u32 + 1,
2958                2 * LANE as u32,
2959                2 * LANE as u32 + 1,
2960                2 * LANE as u32,
2961                2 * LANE as u32 + 1
2962            ]
2963        );
2964        vcmlaq_f16(a, b, c)
2965    }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
2969#[inline]
2970#[target_feature(enable = "neon,fcma")]
2971#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2972#[rustc_legacy_const_generics(3)]
2973#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2974pub fn vcmla_lane_f32<const LANE: i32>(
2975    a: float32x2_t,
2976    b: float32x2_t,
2977    c: float32x2_t,
2978) -> float32x2_t {
2979    static_assert!(LANE == 0);
2980    unsafe {
2981        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
2982        vcmla_f32(a, b, c)
2983    }
2984}
2985#[doc = "Floating-point complex multiply accumulate"]
2986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
2987#[inline]
2988#[target_feature(enable = "neon,fcma")]
2989#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2990#[rustc_legacy_const_generics(3)]
2991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2992pub fn vcmlaq_lane_f32<const LANE: i32>(
2993    a: float32x4_t,
2994    b: float32x4_t,
2995    c: float32x2_t,
2996) -> float32x4_t {
2997    static_assert!(LANE == 0);
2998    unsafe {
2999        let c: float32x4_t = simd_shuffle!(
3000            c,
3001            c,
3002            [
3003                2 * LANE as u32,
3004                2 * LANE as u32 + 1,
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1
3007            ]
3008        );
3009        vcmlaq_f32(a, b, c)
3010    }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3014#[inline]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmla_laneq_f16<const LANE: i32>(
3022    a: float16x4_t,
3023    b: float16x4_t,
3024    c: float16x8_t,
3025) -> float16x4_t {
3026    static_assert_uimm_bits!(LANE, 2);
3027    unsafe {
3028        let c: float16x4_t = simd_shuffle!(
3029            c,
3030            c,
3031            [
3032                2 * LANE as u32,
3033                2 * LANE as u32 + 1,
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1
3036            ]
3037        );
3038        vcmla_f16(a, b, c)
3039    }
3040}
3041#[doc = "Floating-point complex multiply accumulate"]
3042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3043#[inline]
3044#[target_feature(enable = "neon,fcma")]
3045#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3046#[rustc_legacy_const_generics(3)]
3047#[target_feature(enable = "neon,fp16")]
3048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3049#[cfg(not(target_arch = "arm64ec"))]
3050pub fn vcmlaq_laneq_f16<const LANE: i32>(
3051    a: float16x8_t,
3052    b: float16x8_t,
3053    c: float16x8_t,
3054) -> float16x8_t {
3055    static_assert_uimm_bits!(LANE, 2);
3056    unsafe {
3057        let c: float16x8_t = simd_shuffle!(
3058            c,
3059            c,
3060            [
3061                2 * LANE as u32,
3062                2 * LANE as u32 + 1,
3063                2 * LANE as u32,
3064                2 * LANE as u32 + 1,
3065                2 * LANE as u32,
3066                2 * LANE as u32 + 1,
3067                2 * LANE as u32,
3068                2 * LANE as u32 + 1
3069            ]
3070        );
3071        vcmlaq_f16(a, b, c)
3072    }
3073}
3074#[doc = "Floating-point complex multiply accumulate"]
3075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3076#[inline]
3077#[target_feature(enable = "neon,fcma")]
3078#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3079#[rustc_legacy_const_generics(3)]
3080#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3081pub fn vcmla_laneq_f32<const LANE: i32>(
3082    a: float32x2_t,
3083    b: float32x2_t,
3084    c: float32x4_t,
3085) -> float32x2_t {
3086    static_assert_uimm_bits!(LANE, 1);
3087    unsafe {
3088        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3089        vcmla_f32(a, b, c)
3090    }
3091}
3092#[doc = "Floating-point complex multiply accumulate"]
3093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3094#[inline]
3095#[target_feature(enable = "neon,fcma")]
3096#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3097#[rustc_legacy_const_generics(3)]
3098#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3099pub fn vcmlaq_laneq_f32<const LANE: i32>(
3100    a: float32x4_t,
3101    b: float32x4_t,
3102    c: float32x4_t,
3103) -> float32x4_t {
3104    static_assert_uimm_bits!(LANE, 1);
3105    unsafe {
3106        let c: float32x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmlaq_f32(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3121#[inline]
3122#[target_feature(enable = "neon,fcma")]
3123#[target_feature(enable = "neon,fp16")]
3124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3125#[cfg(not(target_arch = "arm64ec"))]
3126#[cfg_attr(test, assert_instr(fcmla))]
3127pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3128    unsafe extern "unadjusted" {
3129        #[cfg_attr(
3130            any(target_arch = "aarch64", target_arch = "arm64ec"),
3131            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3132        )]
3133        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3134    }
3135    unsafe { _vcmla_rot180_f16(a, b, c) }
3136}
3137#[doc = "Floating-point complex multiply accumulate"]
3138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3139#[inline]
3140#[target_feature(enable = "neon,fcma")]
3141#[target_feature(enable = "neon,fp16")]
3142#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3143#[cfg(not(target_arch = "arm64ec"))]
3144#[cfg_attr(test, assert_instr(fcmla))]
3145pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3146    unsafe extern "unadjusted" {
3147        #[cfg_attr(
3148            any(target_arch = "aarch64", target_arch = "arm64ec"),
3149            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3150        )]
3151        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3152    }
3153    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3154}
3155#[doc = "Floating-point complex multiply accumulate"]
3156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3157#[inline]
3158#[target_feature(enable = "neon,fcma")]
3159#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3160#[cfg_attr(test, assert_instr(fcmla))]
3161pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3162    unsafe extern "unadjusted" {
3163        #[cfg_attr(
3164            any(target_arch = "aarch64", target_arch = "arm64ec"),
3165            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3166        )]
3167        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3168    }
3169    unsafe { _vcmla_rot180_f32(a, b, c) }
3170}
3171#[doc = "Floating-point complex multiply accumulate"]
3172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3173#[inline]
3174#[target_feature(enable = "neon,fcma")]
3175#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3176#[cfg_attr(test, assert_instr(fcmla))]
3177pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3178    unsafe extern "unadjusted" {
3179        #[cfg_attr(
3180            any(target_arch = "aarch64", target_arch = "arm64ec"),
3181            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3182        )]
3183        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3184    }
3185    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3186}
3187#[doc = "Floating-point complex multiply accumulate"]
3188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3189#[inline]
3190#[target_feature(enable = "neon,fcma")]
3191#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3192#[cfg_attr(test, assert_instr(fcmla))]
3193pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3194    unsafe extern "unadjusted" {
3195        #[cfg_attr(
3196            any(target_arch = "aarch64", target_arch = "arm64ec"),
3197            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3198        )]
3199        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3200    }
3201    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3202}
3203#[doc = "Floating-point complex multiply accumulate"]
3204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3205#[inline]
3206#[target_feature(enable = "neon,fcma")]
3207#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3208#[rustc_legacy_const_generics(3)]
3209#[target_feature(enable = "neon,fp16")]
3210#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3211#[cfg(not(target_arch = "arm64ec"))]
3212pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3213    a: float16x4_t,
3214    b: float16x4_t,
3215    c: float16x4_t,
3216) -> float16x4_t {
3217    static_assert_uimm_bits!(LANE, 1);
3218    unsafe {
3219        let c: float16x4_t = simd_shuffle!(
3220            c,
3221            c,
3222            [
3223                2 * LANE as u32,
3224                2 * LANE as u32 + 1,
3225                2 * LANE as u32,
3226                2 * LANE as u32 + 1
3227            ]
3228        );
3229        vcmla_rot180_f16(a, b, c)
3230    }
3231}
3232#[doc = "Floating-point complex multiply accumulate"]
3233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3234#[inline]
3235#[target_feature(enable = "neon,fcma")]
3236#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3237#[rustc_legacy_const_generics(3)]
3238#[target_feature(enable = "neon,fp16")]
3239#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3240#[cfg(not(target_arch = "arm64ec"))]
3241pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3242    a: float16x8_t,
3243    b: float16x8_t,
3244    c: float16x4_t,
3245) -> float16x8_t {
3246    static_assert_uimm_bits!(LANE, 1);
3247    unsafe {
3248        let c: float16x8_t = simd_shuffle!(
3249            c,
3250            c,
3251            [
3252                2 * LANE as u32,
3253                2 * LANE as u32 + 1,
3254                2 * LANE as u32,
3255                2 * LANE as u32 + 1,
3256                2 * LANE as u32,
3257                2 * LANE as u32 + 1,
3258                2 * LANE as u32,
3259                2 * LANE as u32 + 1
3260            ]
3261        );
3262        vcmlaq_rot180_f16(a, b, c)
3263    }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3267#[inline]
3268#[target_feature(enable = "neon,fcma")]
3269#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3270#[rustc_legacy_const_generics(3)]
3271#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3272pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3273    a: float32x2_t,
3274    b: float32x2_t,
3275    c: float32x2_t,
3276) -> float32x2_t {
3277    static_assert!(LANE == 0);
3278    unsafe {
3279        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3280        vcmla_rot180_f32(a, b, c)
3281    }
3282}
3283#[doc = "Floating-point complex multiply accumulate"]
3284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3285#[inline]
3286#[target_feature(enable = "neon,fcma")]
3287#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3288#[rustc_legacy_const_generics(3)]
3289#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3290pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3291    a: float32x4_t,
3292    b: float32x4_t,
3293    c: float32x2_t,
3294) -> float32x4_t {
3295    static_assert!(LANE == 0);
3296    unsafe {
3297        let c: float32x4_t = simd_shuffle!(
3298            c,
3299            c,
3300            [
3301                2 * LANE as u32,
3302                2 * LANE as u32 + 1,
3303                2 * LANE as u32,
3304                2 * LANE as u32 + 1
3305            ]
3306        );
3307        vcmlaq_rot180_f32(a, b, c)
3308    }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3312#[inline]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3320    a: float16x4_t,
3321    b: float16x4_t,
3322    c: float16x8_t,
3323) -> float16x4_t {
3324    static_assert_uimm_bits!(LANE, 2);
3325    unsafe {
3326        let c: float16x4_t = simd_shuffle!(
3327            c,
3328            c,
3329            [
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1
3334            ]
3335        );
3336        vcmla_rot180_f16(a, b, c)
3337    }
3338}
3339#[doc = "Floating-point complex multiply accumulate"]
3340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3341#[inline]
3342#[target_feature(enable = "neon,fcma")]
3343#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3344#[rustc_legacy_const_generics(3)]
3345#[target_feature(enable = "neon,fp16")]
3346#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3347#[cfg(not(target_arch = "arm64ec"))]
3348pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3349    a: float16x8_t,
3350    b: float16x8_t,
3351    c: float16x8_t,
3352) -> float16x8_t {
3353    static_assert_uimm_bits!(LANE, 2);
3354    unsafe {
3355        let c: float16x8_t = simd_shuffle!(
3356            c,
3357            c,
3358            [
3359                2 * LANE as u32,
3360                2 * LANE as u32 + 1,
3361                2 * LANE as u32,
3362                2 * LANE as u32 + 1,
3363                2 * LANE as u32,
3364                2 * LANE as u32 + 1,
3365                2 * LANE as u32,
3366                2 * LANE as u32 + 1
3367            ]
3368        );
3369        vcmlaq_rot180_f16(a, b, c)
3370    }
3371}
3372#[doc = "Floating-point complex multiply accumulate"]
3373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3374#[inline]
3375#[target_feature(enable = "neon,fcma")]
3376#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3377#[rustc_legacy_const_generics(3)]
3378#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3379pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3380    a: float32x2_t,
3381    b: float32x2_t,
3382    c: float32x4_t,
3383) -> float32x2_t {
3384    static_assert_uimm_bits!(LANE, 1);
3385    unsafe {
3386        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3387        vcmla_rot180_f32(a, b, c)
3388    }
3389}
3390#[doc = "Floating-point complex multiply accumulate"]
3391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3392#[inline]
3393#[target_feature(enable = "neon,fcma")]
3394#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3395#[rustc_legacy_const_generics(3)]
3396#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3397pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3398    a: float32x4_t,
3399    b: float32x4_t,
3400    c: float32x4_t,
3401) -> float32x4_t {
3402    static_assert_uimm_bits!(LANE, 1);
3403    unsafe {
3404        let c: float32x4_t = simd_shuffle!(
3405            c,
3406            c,
3407            [
3408                2 * LANE as u32,
3409                2 * LANE as u32 + 1,
3410                2 * LANE as u32,
3411                2 * LANE as u32 + 1
3412            ]
3413        );
3414        vcmlaq_rot180_f32(a, b, c)
3415    }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3419#[inline]
3420#[target_feature(enable = "neon,fcma")]
3421#[target_feature(enable = "neon,fp16")]
3422#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3423#[cfg(not(target_arch = "arm64ec"))]
3424#[cfg_attr(test, assert_instr(fcmla))]
3425pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3426    unsafe extern "unadjusted" {
3427        #[cfg_attr(
3428            any(target_arch = "aarch64", target_arch = "arm64ec"),
3429            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3430        )]
3431        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3432    }
3433    unsafe { _vcmla_rot270_f16(a, b, c) }
3434}
3435#[doc = "Floating-point complex multiply accumulate"]
3436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3437#[inline]
3438#[target_feature(enable = "neon,fcma")]
3439#[target_feature(enable = "neon,fp16")]
3440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3441#[cfg(not(target_arch = "arm64ec"))]
3442#[cfg_attr(test, assert_instr(fcmla))]
3443pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3444    unsafe extern "unadjusted" {
3445        #[cfg_attr(
3446            any(target_arch = "aarch64", target_arch = "arm64ec"),
3447            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3448        )]
3449        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3450    }
3451    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3452}
3453#[doc = "Floating-point complex multiply accumulate"]
3454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3455#[inline]
3456#[target_feature(enable = "neon,fcma")]
3457#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3458#[cfg_attr(test, assert_instr(fcmla))]
3459pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3460    unsafe extern "unadjusted" {
3461        #[cfg_attr(
3462            any(target_arch = "aarch64", target_arch = "arm64ec"),
3463            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3464        )]
3465        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3466    }
3467    unsafe { _vcmla_rot270_f32(a, b, c) }
3468}
3469#[doc = "Floating-point complex multiply accumulate"]
3470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3471#[inline]
3472#[target_feature(enable = "neon,fcma")]
3473#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3474#[cfg_attr(test, assert_instr(fcmla))]
3475pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3476    unsafe extern "unadjusted" {
3477        #[cfg_attr(
3478            any(target_arch = "aarch64", target_arch = "arm64ec"),
3479            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3480        )]
3481        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3482    }
3483    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3484}
3485#[doc = "Floating-point complex multiply accumulate"]
3486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3487#[inline]
3488#[target_feature(enable = "neon,fcma")]
3489#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3490#[cfg_attr(test, assert_instr(fcmla))]
3491pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3492    unsafe extern "unadjusted" {
3493        #[cfg_attr(
3494            any(target_arch = "aarch64", target_arch = "arm64ec"),
3495            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3496        )]
3497        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3498    }
3499    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3500}
3501#[doc = "Floating-point complex multiply accumulate"]
3502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3503#[inline]
3504#[target_feature(enable = "neon,fcma")]
3505#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3506#[rustc_legacy_const_generics(3)]
3507#[target_feature(enable = "neon,fp16")]
3508#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3509#[cfg(not(target_arch = "arm64ec"))]
3510pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3511    a: float16x4_t,
3512    b: float16x4_t,
3513    c: float16x4_t,
3514) -> float16x4_t {
3515    static_assert_uimm_bits!(LANE, 1);
3516    unsafe {
3517        let c: float16x4_t = simd_shuffle!(
3518            c,
3519            c,
3520            [
3521                2 * LANE as u32,
3522                2 * LANE as u32 + 1,
3523                2 * LANE as u32,
3524                2 * LANE as u32 + 1
3525            ]
3526        );
3527        vcmla_rot270_f16(a, b, c)
3528    }
3529}
3530#[doc = "Floating-point complex multiply accumulate"]
3531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3532#[inline]
3533#[target_feature(enable = "neon,fcma")]
3534#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3535#[rustc_legacy_const_generics(3)]
3536#[target_feature(enable = "neon,fp16")]
3537#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3538#[cfg(not(target_arch = "arm64ec"))]
3539pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3540    a: float16x8_t,
3541    b: float16x8_t,
3542    c: float16x4_t,
3543) -> float16x8_t {
3544    static_assert_uimm_bits!(LANE, 1);
3545    unsafe {
3546        let c: float16x8_t = simd_shuffle!(
3547            c,
3548            c,
3549            [
3550                2 * LANE as u32,
3551                2 * LANE as u32 + 1,
3552                2 * LANE as u32,
3553                2 * LANE as u32 + 1,
3554                2 * LANE as u32,
3555                2 * LANE as u32 + 1,
3556                2 * LANE as u32,
3557                2 * LANE as u32 + 1
3558            ]
3559        );
3560        vcmlaq_rot270_f16(a, b, c)
3561    }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3565#[inline]
3566#[target_feature(enable = "neon,fcma")]
3567#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3568#[rustc_legacy_const_generics(3)]
3569#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3570pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3571    a: float32x2_t,
3572    b: float32x2_t,
3573    c: float32x2_t,
3574) -> float32x2_t {
3575    static_assert!(LANE == 0);
3576    unsafe {
3577        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3578        vcmla_rot270_f32(a, b, c)
3579    }
3580}
3581#[doc = "Floating-point complex multiply accumulate"]
3582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3583#[inline]
3584#[target_feature(enable = "neon,fcma")]
3585#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3586#[rustc_legacy_const_generics(3)]
3587#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3588pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3589    a: float32x4_t,
3590    b: float32x4_t,
3591    c: float32x2_t,
3592) -> float32x4_t {
3593    static_assert!(LANE == 0);
3594    unsafe {
3595        let c: float32x4_t = simd_shuffle!(
3596            c,
3597            c,
3598            [
3599                2 * LANE as u32,
3600                2 * LANE as u32 + 1,
3601                2 * LANE as u32,
3602                2 * LANE as u32 + 1
3603            ]
3604        );
3605        vcmlaq_rot270_f32(a, b, c)
3606    }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3610#[inline]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3618    a: float16x4_t,
3619    b: float16x4_t,
3620    c: float16x8_t,
3621) -> float16x4_t {
3622    static_assert_uimm_bits!(LANE, 2);
3623    unsafe {
3624        let c: float16x4_t = simd_shuffle!(
3625            c,
3626            c,
3627            [
3628                2 * LANE as u32,
3629                2 * LANE as u32 + 1,
3630                2 * LANE as u32,
3631                2 * LANE as u32 + 1
3632            ]
3633        );
3634        vcmla_rot270_f16(a, b, c)
3635    }
3636}
3637#[doc = "Floating-point complex multiply accumulate"]
3638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3639#[inline]
3640#[target_feature(enable = "neon,fcma")]
3641#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3642#[rustc_legacy_const_generics(3)]
3643#[target_feature(enable = "neon,fp16")]
3644#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3645#[cfg(not(target_arch = "arm64ec"))]
3646pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3647    a: float16x8_t,
3648    b: float16x8_t,
3649    c: float16x8_t,
3650) -> float16x8_t {
3651    static_assert_uimm_bits!(LANE, 2);
3652    unsafe {
3653        let c: float16x8_t = simd_shuffle!(
3654            c,
3655            c,
3656            [
3657                2 * LANE as u32,
3658                2 * LANE as u32 + 1,
3659                2 * LANE as u32,
3660                2 * LANE as u32 + 1,
3661                2 * LANE as u32,
3662                2 * LANE as u32 + 1,
3663                2 * LANE as u32,
3664                2 * LANE as u32 + 1
3665            ]
3666        );
3667        vcmlaq_rot270_f16(a, b, c)
3668    }
3669}
3670#[doc = "Floating-point complex multiply accumulate"]
3671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3672#[inline]
3673#[target_feature(enable = "neon,fcma")]
3674#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3675#[rustc_legacy_const_generics(3)]
3676#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3677pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3678    a: float32x2_t,
3679    b: float32x2_t,
3680    c: float32x4_t,
3681) -> float32x2_t {
3682    static_assert_uimm_bits!(LANE, 1);
3683    unsafe {
3684        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3685        vcmla_rot270_f32(a, b, c)
3686    }
3687}
3688#[doc = "Floating-point complex multiply accumulate"]
3689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3690#[inline]
3691#[target_feature(enable = "neon,fcma")]
3692#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3693#[rustc_legacy_const_generics(3)]
3694#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3695pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3696    a: float32x4_t,
3697    b: float32x4_t,
3698    c: float32x4_t,
3699) -> float32x4_t {
3700    static_assert_uimm_bits!(LANE, 1);
3701    unsafe {
3702        let c: float32x4_t = simd_shuffle!(
3703            c,
3704            c,
3705            [
3706                2 * LANE as u32,
3707                2 * LANE as u32 + 1,
3708                2 * LANE as u32,
3709                2 * LANE as u32 + 1
3710            ]
3711        );
3712        vcmlaq_rot270_f32(a, b, c)
3713    }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3717#[inline]
3718#[target_feature(enable = "neon,fcma")]
3719#[target_feature(enable = "neon,fp16")]
3720#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3721#[cfg(not(target_arch = "arm64ec"))]
3722#[cfg_attr(test, assert_instr(fcmla))]
3723pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3724    unsafe extern "unadjusted" {
3725        #[cfg_attr(
3726            any(target_arch = "aarch64", target_arch = "arm64ec"),
3727            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3728        )]
3729        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3730    }
3731    unsafe { _vcmla_rot90_f16(a, b, c) }
3732}
3733#[doc = "Floating-point complex multiply accumulate"]
3734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3735#[inline]
3736#[target_feature(enable = "neon,fcma")]
3737#[target_feature(enable = "neon,fp16")]
3738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3739#[cfg(not(target_arch = "arm64ec"))]
3740#[cfg_attr(test, assert_instr(fcmla))]
3741pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3742    unsafe extern "unadjusted" {
3743        #[cfg_attr(
3744            any(target_arch = "aarch64", target_arch = "arm64ec"),
3745            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3746        )]
3747        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3748    }
3749    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3750}
3751#[doc = "Floating-point complex multiply accumulate"]
3752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3753#[inline]
3754#[target_feature(enable = "neon,fcma")]
3755#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3756#[cfg_attr(test, assert_instr(fcmla))]
3757pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3758    unsafe extern "unadjusted" {
3759        #[cfg_attr(
3760            any(target_arch = "aarch64", target_arch = "arm64ec"),
3761            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3762        )]
3763        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3764    }
3765    unsafe { _vcmla_rot90_f32(a, b, c) }
3766}
3767#[doc = "Floating-point complex multiply accumulate"]
3768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3769#[inline]
3770#[target_feature(enable = "neon,fcma")]
3771#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3772#[cfg_attr(test, assert_instr(fcmla))]
3773pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3774    unsafe extern "unadjusted" {
3775        #[cfg_attr(
3776            any(target_arch = "aarch64", target_arch = "arm64ec"),
3777            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3778        )]
3779        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3780    }
3781    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3782}
3783#[doc = "Floating-point complex multiply accumulate"]
3784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3785#[inline]
3786#[target_feature(enable = "neon,fcma")]
3787#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3788#[cfg_attr(test, assert_instr(fcmla))]
3789pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3790    unsafe extern "unadjusted" {
3791        #[cfg_attr(
3792            any(target_arch = "aarch64", target_arch = "arm64ec"),
3793            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3794        )]
3795        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3796    }
3797    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3798}
3799#[doc = "Floating-point complex multiply accumulate"]
3800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3801#[inline]
3802#[target_feature(enable = "neon,fcma")]
3803#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3804#[rustc_legacy_const_generics(3)]
3805#[target_feature(enable = "neon,fp16")]
3806#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3807#[cfg(not(target_arch = "arm64ec"))]
3808pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3809    a: float16x4_t,
3810    b: float16x4_t,
3811    c: float16x4_t,
3812) -> float16x4_t {
3813    static_assert_uimm_bits!(LANE, 1);
3814    unsafe {
3815        let c: float16x4_t = simd_shuffle!(
3816            c,
3817            c,
3818            [
3819                2 * LANE as u32,
3820                2 * LANE as u32 + 1,
3821                2 * LANE as u32,
3822                2 * LANE as u32 + 1
3823            ]
3824        );
3825        vcmla_rot90_f16(a, b, c)
3826    }
3827}
3828#[doc = "Floating-point complex multiply accumulate"]
3829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3830#[inline]
3831#[target_feature(enable = "neon,fcma")]
3832#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3833#[rustc_legacy_const_generics(3)]
3834#[target_feature(enable = "neon,fp16")]
3835#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3836#[cfg(not(target_arch = "arm64ec"))]
3837pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3838    a: float16x8_t,
3839    b: float16x8_t,
3840    c: float16x4_t,
3841) -> float16x8_t {
3842    static_assert_uimm_bits!(LANE, 1);
3843    unsafe {
3844        let c: float16x8_t = simd_shuffle!(
3845            c,
3846            c,
3847            [
3848                2 * LANE as u32,
3849                2 * LANE as u32 + 1,
3850                2 * LANE as u32,
3851                2 * LANE as u32 + 1,
3852                2 * LANE as u32,
3853                2 * LANE as u32 + 1,
3854                2 * LANE as u32,
3855                2 * LANE as u32 + 1
3856            ]
3857        );
3858        vcmlaq_rot90_f16(a, b, c)
3859    }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3863#[inline]
3864#[target_feature(enable = "neon,fcma")]
3865#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3866#[rustc_legacy_const_generics(3)]
3867#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3868pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3869    a: float32x2_t,
3870    b: float32x2_t,
3871    c: float32x2_t,
3872) -> float32x2_t {
3873    static_assert!(LANE == 0);
3874    unsafe {
3875        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3876        vcmla_rot90_f32(a, b, c)
3877    }
3878}
3879#[doc = "Floating-point complex multiply accumulate"]
3880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3881#[inline]
3882#[target_feature(enable = "neon,fcma")]
3883#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3884#[rustc_legacy_const_generics(3)]
3885#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3886pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3887    a: float32x4_t,
3888    b: float32x4_t,
3889    c: float32x2_t,
3890) -> float32x4_t {
3891    static_assert!(LANE == 0);
3892    unsafe {
3893        let c: float32x4_t = simd_shuffle!(
3894            c,
3895            c,
3896            [
3897                2 * LANE as u32,
3898                2 * LANE as u32 + 1,
3899                2 * LANE as u32,
3900                2 * LANE as u32 + 1
3901            ]
3902        );
3903        vcmlaq_rot90_f32(a, b, c)
3904    }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3908#[inline]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3916    a: float16x4_t,
3917    b: float16x4_t,
3918    c: float16x8_t,
3919) -> float16x4_t {
3920    static_assert_uimm_bits!(LANE, 2);
3921    unsafe {
3922        let c: float16x4_t = simd_shuffle!(
3923            c,
3924            c,
3925            [
3926                2 * LANE as u32,
3927                2 * LANE as u32 + 1,
3928                2 * LANE as u32,
3929                2 * LANE as u32 + 1
3930            ]
3931        );
3932        vcmla_rot90_f16(a, b, c)
3933    }
3934}
3935#[doc = "Floating-point complex multiply accumulate"]
3936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3937#[inline]
3938#[target_feature(enable = "neon,fcma")]
3939#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3940#[rustc_legacy_const_generics(3)]
3941#[target_feature(enable = "neon,fp16")]
3942#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3943#[cfg(not(target_arch = "arm64ec"))]
3944pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
3945    a: float16x8_t,
3946    b: float16x8_t,
3947    c: float16x8_t,
3948) -> float16x8_t {
3949    static_assert_uimm_bits!(LANE, 2);
3950    unsafe {
3951        let c: float16x8_t = simd_shuffle!(
3952            c,
3953            c,
3954            [
3955                2 * LANE as u32,
3956                2 * LANE as u32 + 1,
3957                2 * LANE as u32,
3958                2 * LANE as u32 + 1,
3959                2 * LANE as u32,
3960                2 * LANE as u32 + 1,
3961                2 * LANE as u32,
3962                2 * LANE as u32 + 1
3963            ]
3964        );
3965        vcmlaq_rot90_f16(a, b, c)
3966    }
3967}
3968#[doc = "Floating-point complex multiply accumulate"]
3969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
3970#[inline]
3971#[target_feature(enable = "neon,fcma")]
3972#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3973#[rustc_legacy_const_generics(3)]
3974#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3975pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
3976    a: float32x2_t,
3977    b: float32x2_t,
3978    c: float32x4_t,
3979) -> float32x2_t {
3980    static_assert_uimm_bits!(LANE, 1);
3981    unsafe {
3982        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3983        vcmla_rot90_f32(a, b, c)
3984    }
3985}
3986#[doc = "Floating-point complex multiply accumulate"]
3987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
3988#[inline]
3989#[target_feature(enable = "neon,fcma")]
3990#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3991#[rustc_legacy_const_generics(3)]
3992#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3993pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
3994    a: float32x4_t,
3995    b: float32x4_t,
3996    c: float32x4_t,
3997) -> float32x4_t {
3998    static_assert_uimm_bits!(LANE, 1);
3999    unsafe {
4000        let c: float32x4_t = simd_shuffle!(
4001            c,
4002            c,
4003            [
4004                2 * LANE as u32,
4005                2 * LANE as u32 + 1,
4006                2 * LANE as u32,
4007                2 * LANE as u32 + 1
4008            ]
4009        );
4010        vcmlaq_rot90_f32(a, b, c)
4011    }
4012}
4013#[doc = "Insert vector element from another vector element"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4015#[inline]
4016#[target_feature(enable = "neon")]
4017#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4018#[rustc_legacy_const_generics(1, 3)]
4019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4020pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4021    a: float32x2_t,
4022    b: float32x2_t,
4023) -> float32x2_t {
4024    static_assert_uimm_bits!(LANE1, 1);
4025    static_assert_uimm_bits!(LANE2, 1);
4026    unsafe {
4027        match LANE1 & 0b1 {
4028            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4029            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4030            _ => unreachable_unchecked(),
4031        }
4032    }
4033}
4034#[doc = "Insert vector element from another vector element"]
4035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4036#[inline]
4037#[target_feature(enable = "neon")]
4038#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4039#[rustc_legacy_const_generics(1, 3)]
4040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4041pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4042    static_assert_uimm_bits!(LANE1, 3);
4043    static_assert_uimm_bits!(LANE2, 3);
4044    unsafe {
4045        match LANE1 & 0b111 {
4046            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4047            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4048            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4049            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4050            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4051            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4052            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4053            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4054            _ => unreachable_unchecked(),
4055        }
4056    }
4057}
4058#[doc = "Insert vector element from another vector element"]
4059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4060#[inline]
4061#[target_feature(enable = "neon")]
4062#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4063#[rustc_legacy_const_generics(1, 3)]
4064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4065pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4066    static_assert_uimm_bits!(LANE1, 2);
4067    static_assert_uimm_bits!(LANE2, 2);
4068    unsafe {
4069        match LANE1 & 0b11 {
4070            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4071            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4072            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4073            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4074            _ => unreachable_unchecked(),
4075        }
4076    }
4077}
4078#[doc = "Insert vector element from another vector element"]
4079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4080#[inline]
4081#[target_feature(enable = "neon")]
4082#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4083#[rustc_legacy_const_generics(1, 3)]
4084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4085pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4086    static_assert_uimm_bits!(LANE1, 1);
4087    static_assert_uimm_bits!(LANE2, 1);
4088    unsafe {
4089        match LANE1 & 0b1 {
4090            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4091            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4092            _ => unreachable_unchecked(),
4093        }
4094    }
4095}
4096#[doc = "Insert vector element from another vector element"]
4097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4098#[inline]
4099#[target_feature(enable = "neon")]
4100#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4101#[rustc_legacy_const_generics(1, 3)]
4102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4103pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4104    static_assert_uimm_bits!(LANE1, 3);
4105    static_assert_uimm_bits!(LANE2, 3);
4106    unsafe {
4107        match LANE1 & 0b111 {
4108            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4109            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4110            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4111            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4112            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4113            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4114            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4115            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4116            _ => unreachable_unchecked(),
4117        }
4118    }
4119}
4120#[doc = "Insert vector element from another vector element"]
4121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4122#[inline]
4123#[target_feature(enable = "neon")]
4124#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4125#[rustc_legacy_const_generics(1, 3)]
4126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4127pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4128    a: uint16x4_t,
4129    b: uint16x4_t,
4130) -> uint16x4_t {
4131    static_assert_uimm_bits!(LANE1, 2);
4132    static_assert_uimm_bits!(LANE2, 2);
4133    unsafe {
4134        match LANE1 & 0b11 {
4135            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4136            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4137            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4138            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4139            _ => unreachable_unchecked(),
4140        }
4141    }
4142}
4143#[doc = "Insert vector element from another vector element"]
4144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4145#[inline]
4146#[target_feature(enable = "neon")]
4147#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4148#[rustc_legacy_const_generics(1, 3)]
4149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4150pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4151    a: uint32x2_t,
4152    b: uint32x2_t,
4153) -> uint32x2_t {
4154    static_assert_uimm_bits!(LANE1, 1);
4155    static_assert_uimm_bits!(LANE2, 1);
4156    unsafe {
4157        match LANE1 & 0b1 {
4158            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4159            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4160            _ => unreachable_unchecked(),
4161        }
4162    }
4163}
4164#[doc = "Insert vector element from another vector element"]
4165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4166#[inline]
4167#[target_feature(enable = "neon")]
4168#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4169#[rustc_legacy_const_generics(1, 3)]
4170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4171pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4172    static_assert_uimm_bits!(LANE1, 3);
4173    static_assert_uimm_bits!(LANE2, 3);
4174    unsafe {
4175        match LANE1 & 0b111 {
4176            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4177            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4178            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4179            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4180            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4181            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4182            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4183            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4184            _ => unreachable_unchecked(),
4185        }
4186    }
4187}
4188#[doc = "Insert vector element from another vector element"]
4189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4190#[inline]
4191#[target_feature(enable = "neon")]
4192#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4193#[rustc_legacy_const_generics(1, 3)]
4194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4195pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4196    a: poly16x4_t,
4197    b: poly16x4_t,
4198) -> poly16x4_t {
4199    static_assert_uimm_bits!(LANE1, 2);
4200    static_assert_uimm_bits!(LANE2, 2);
4201    unsafe {
4202        match LANE1 & 0b11 {
4203            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4204            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4205            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4206            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4207            _ => unreachable_unchecked(),
4208        }
4209    }
4210}
4211#[doc = "Insert vector element from another vector element"]
4212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4213#[inline]
4214#[target_feature(enable = "neon")]
4215#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4216#[rustc_legacy_const_generics(1, 3)]
4217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4218pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4219    a: float32x2_t,
4220    b: float32x4_t,
4221) -> float32x2_t {
4222    static_assert_uimm_bits!(LANE1, 1);
4223    static_assert_uimm_bits!(LANE2, 2);
4224    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4225    unsafe {
4226        match LANE1 & 0b1 {
4227            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4228            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4229            _ => unreachable_unchecked(),
4230        }
4231    }
4232}
4233#[doc = "Insert vector element from another vector element"]
4234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4235#[inline]
4236#[target_feature(enable = "neon")]
4237#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4238#[rustc_legacy_const_generics(1, 3)]
4239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4240pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4241    static_assert_uimm_bits!(LANE1, 3);
4242    static_assert_uimm_bits!(LANE2, 4);
4243    let a: int8x16_t =
4244        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4245    unsafe {
4246        match LANE1 & 0b111 {
4247            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4248            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4249            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4250            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4251            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4252            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4253            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4254            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4255            _ => unreachable_unchecked(),
4256        }
4257    }
4258}
4259#[doc = "Insert vector element from another vector element"]
4260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4261#[inline]
4262#[target_feature(enable = "neon")]
4263#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4264#[rustc_legacy_const_generics(1, 3)]
4265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4266pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4267    a: int16x4_t,
4268    b: int16x8_t,
4269) -> int16x4_t {
4270    static_assert_uimm_bits!(LANE1, 2);
4271    static_assert_uimm_bits!(LANE2, 3);
4272    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4273    unsafe {
4274        match LANE1 & 0b11 {
4275            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4276            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4277            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4278            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4279            _ => unreachable_unchecked(),
4280        }
4281    }
4282}
4283#[doc = "Insert vector element from another vector element"]
4284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4285#[inline]
4286#[target_feature(enable = "neon")]
4287#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4288#[rustc_legacy_const_generics(1, 3)]
4289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4290pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4291    a: int32x2_t,
4292    b: int32x4_t,
4293) -> int32x2_t {
4294    static_assert_uimm_bits!(LANE1, 1);
4295    static_assert_uimm_bits!(LANE2, 2);
4296    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4297    unsafe {
4298        match LANE1 & 0b1 {
4299            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4300            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4301            _ => unreachable_unchecked(),
4302        }
4303    }
4304}
4305#[doc = "Insert vector element from another vector element"]
4306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4307#[inline]
4308#[target_feature(enable = "neon")]
4309#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4310#[rustc_legacy_const_generics(1, 3)]
4311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4312pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4313    a: uint8x8_t,
4314    b: uint8x16_t,
4315) -> uint8x8_t {
4316    static_assert_uimm_bits!(LANE1, 3);
4317    static_assert_uimm_bits!(LANE2, 4);
4318    let a: uint8x16_t =
4319        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4320    unsafe {
4321        match LANE1 & 0b111 {
4322            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4323            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4324            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4325            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4326            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4327            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4328            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4329            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4330            _ => unreachable_unchecked(),
4331        }
4332    }
4333}
4334#[doc = "Insert vector element from another vector element"]
4335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4336#[inline]
4337#[target_feature(enable = "neon")]
4338#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4339#[rustc_legacy_const_generics(1, 3)]
4340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4341pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4342    a: uint16x4_t,
4343    b: uint16x8_t,
4344) -> uint16x4_t {
4345    static_assert_uimm_bits!(LANE1, 2);
4346    static_assert_uimm_bits!(LANE2, 3);
4347    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4348    unsafe {
4349        match LANE1 & 0b11 {
4350            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4351            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4352            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4353            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4354            _ => unreachable_unchecked(),
4355        }
4356    }
4357}
4358#[doc = "Insert vector element from another vector element"]
4359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4360#[inline]
4361#[target_feature(enable = "neon")]
4362#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4363#[rustc_legacy_const_generics(1, 3)]
4364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4365pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4366    a: uint32x2_t,
4367    b: uint32x4_t,
4368) -> uint32x2_t {
4369    static_assert_uimm_bits!(LANE1, 1);
4370    static_assert_uimm_bits!(LANE2, 2);
4371    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4372    unsafe {
4373        match LANE1 & 0b1 {
4374            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4375            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4376            _ => unreachable_unchecked(),
4377        }
4378    }
4379}
4380#[doc = "Insert vector element from another vector element"]
4381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4382#[inline]
4383#[target_feature(enable = "neon")]
4384#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4385#[rustc_legacy_const_generics(1, 3)]
4386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4387pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4388    a: poly8x8_t,
4389    b: poly8x16_t,
4390) -> poly8x8_t {
4391    static_assert_uimm_bits!(LANE1, 3);
4392    static_assert_uimm_bits!(LANE2, 4);
4393    let a: poly8x16_t =
4394        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4395    unsafe {
4396        match LANE1 & 0b111 {
4397            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4398            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4399            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4400            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4401            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4402            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4403            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4404            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4405            _ => unreachable_unchecked(),
4406        }
4407    }
4408}
4409#[doc = "Insert vector element from another vector element"]
4410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4411#[inline]
4412#[target_feature(enable = "neon")]
4413#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4414#[rustc_legacy_const_generics(1, 3)]
4415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4416pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4417    a: poly16x4_t,
4418    b: poly16x8_t,
4419) -> poly16x4_t {
4420    static_assert_uimm_bits!(LANE1, 2);
4421    static_assert_uimm_bits!(LANE2, 3);
4422    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4423    unsafe {
4424        match LANE1 & 0b11 {
4425            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4426            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4427            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4428            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4429            _ => unreachable_unchecked(),
4430        }
4431    }
4432}
4433#[doc = "Insert vector element from another vector element"]
4434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4435#[inline]
4436#[target_feature(enable = "neon")]
4437#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4438#[rustc_legacy_const_generics(1, 3)]
4439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4440pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4441    a: float32x4_t,
4442    b: float32x2_t,
4443) -> float32x4_t {
4444    static_assert_uimm_bits!(LANE1, 2);
4445    static_assert_uimm_bits!(LANE2, 1);
4446    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4447    unsafe {
4448        match LANE1 & 0b11 {
4449            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4450            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4451            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4452            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4453            _ => unreachable_unchecked(),
4454        }
4455    }
4456}
4457#[doc = "Insert vector element from another vector element"]
4458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4459#[inline]
4460#[target_feature(enable = "neon")]
4461#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4462#[rustc_legacy_const_generics(1, 3)]
4463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4464pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4465    a: float64x2_t,
4466    b: float64x1_t,
4467) -> float64x2_t {
4468    static_assert_uimm_bits!(LANE1, 1);
4469    static_assert!(LANE2 == 0);
4470    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4471    unsafe {
4472        match LANE1 & 0b1 {
4473            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4474            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4475            _ => unreachable_unchecked(),
4476        }
4477    }
4478}
4479#[doc = "Insert vector element from another vector element"]
4480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4481#[inline]
4482#[target_feature(enable = "neon")]
4483#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4484#[rustc_legacy_const_generics(1, 3)]
4485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4486pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4487    a: int64x2_t,
4488    b: int64x1_t,
4489) -> int64x2_t {
4490    static_assert_uimm_bits!(LANE1, 1);
4491    static_assert!(LANE2 == 0);
4492    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4493    unsafe {
4494        match LANE1 & 0b1 {
4495            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4496            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4497            _ => unreachable_unchecked(),
4498        }
4499    }
4500}
4501#[doc = "Insert vector element from another vector element"]
4502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4503#[inline]
4504#[target_feature(enable = "neon")]
4505#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4506#[rustc_legacy_const_generics(1, 3)]
4507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4508pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4509    a: uint64x2_t,
4510    b: uint64x1_t,
4511) -> uint64x2_t {
4512    static_assert_uimm_bits!(LANE1, 1);
4513    static_assert!(LANE2 == 0);
4514    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4515    unsafe {
4516        match LANE1 & 0b1 {
4517            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4518            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4519            _ => unreachable_unchecked(),
4520        }
4521    }
4522}
4523#[doc = "Insert vector element from another vector element"]
4524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4525#[inline]
4526#[target_feature(enable = "neon")]
4527#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4528#[rustc_legacy_const_generics(1, 3)]
4529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4530pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4531    a: poly64x2_t,
4532    b: poly64x1_t,
4533) -> poly64x2_t {
4534    static_assert_uimm_bits!(LANE1, 1);
4535    static_assert!(LANE2 == 0);
4536    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4537    unsafe {
4538        match LANE1 & 0b1 {
4539            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4540            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4541            _ => unreachable_unchecked(),
4542        }
4543    }
4544}
4545#[doc = "Insert vector element from another vector element"]
4546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4547#[inline]
4548#[target_feature(enable = "neon")]
4549#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4550#[rustc_legacy_const_generics(1, 3)]
4551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4552pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4553    static_assert_uimm_bits!(LANE1, 4);
4554    static_assert_uimm_bits!(LANE2, 3);
4555    let b: int8x16_t =
4556        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4557    unsafe {
4558        match LANE1 & 0b1111 {
4559            0 => simd_shuffle!(
4560                a,
4561                b,
4562                [
4563                    16 + LANE2 as u32,
4564                    1,
4565                    2,
4566                    3,
4567                    4,
4568                    5,
4569                    6,
4570                    7,
4571                    8,
4572                    9,
4573                    10,
4574                    11,
4575                    12,
4576                    13,
4577                    14,
4578                    15
4579                ]
4580            ),
4581            1 => simd_shuffle!(
4582                a,
4583                b,
4584                [
4585                    0,
4586                    16 + LANE2 as u32,
4587                    2,
4588                    3,
4589                    4,
4590                    5,
4591                    6,
4592                    7,
4593                    8,
4594                    9,
4595                    10,
4596                    11,
4597                    12,
4598                    13,
4599                    14,
4600                    15
4601                ]
4602            ),
4603            2 => simd_shuffle!(
4604                a,
4605                b,
4606                [
4607                    0,
4608                    1,
4609                    16 + LANE2 as u32,
4610                    3,
4611                    4,
4612                    5,
4613                    6,
4614                    7,
4615                    8,
4616                    9,
4617                    10,
4618                    11,
4619                    12,
4620                    13,
4621                    14,
4622                    15
4623                ]
4624            ),
4625            3 => simd_shuffle!(
4626                a,
4627                b,
4628                [
4629                    0,
4630                    1,
4631                    2,
4632                    16 + LANE2 as u32,
4633                    4,
4634                    5,
4635                    6,
4636                    7,
4637                    8,
4638                    9,
4639                    10,
4640                    11,
4641                    12,
4642                    13,
4643                    14,
4644                    15
4645                ]
4646            ),
4647            4 => simd_shuffle!(
4648                a,
4649                b,
4650                [
4651                    0,
4652                    1,
4653                    2,
4654                    3,
4655                    16 + LANE2 as u32,
4656                    5,
4657                    6,
4658                    7,
4659                    8,
4660                    9,
4661                    10,
4662                    11,
4663                    12,
4664                    13,
4665                    14,
4666                    15
4667                ]
4668            ),
4669            5 => simd_shuffle!(
4670                a,
4671                b,
4672                [
4673                    0,
4674                    1,
4675                    2,
4676                    3,
4677                    4,
4678                    16 + LANE2 as u32,
4679                    6,
4680                    7,
4681                    8,
4682                    9,
4683                    10,
4684                    11,
4685                    12,
4686                    13,
4687                    14,
4688                    15
4689                ]
4690            ),
4691            6 => simd_shuffle!(
4692                a,
4693                b,
4694                [
4695                    0,
4696                    1,
4697                    2,
4698                    3,
4699                    4,
4700                    5,
4701                    16 + LANE2 as u32,
4702                    7,
4703                    8,
4704                    9,
4705                    10,
4706                    11,
4707                    12,
4708                    13,
4709                    14,
4710                    15
4711                ]
4712            ),
4713            7 => simd_shuffle!(
4714                a,
4715                b,
4716                [
4717                    0,
4718                    1,
4719                    2,
4720                    3,
4721                    4,
4722                    5,
4723                    6,
4724                    16 + LANE2 as u32,
4725                    8,
4726                    9,
4727                    10,
4728                    11,
4729                    12,
4730                    13,
4731                    14,
4732                    15
4733                ]
4734            ),
4735            8 => simd_shuffle!(
4736                a,
4737                b,
4738                [
4739                    0,
4740                    1,
4741                    2,
4742                    3,
4743                    4,
4744                    5,
4745                    6,
4746                    7,
4747                    16 + LANE2 as u32,
4748                    9,
4749                    10,
4750                    11,
4751                    12,
4752                    13,
4753                    14,
4754                    15
4755                ]
4756            ),
4757            9 => simd_shuffle!(
4758                a,
4759                b,
4760                [
4761                    0,
4762                    1,
4763                    2,
4764                    3,
4765                    4,
4766                    5,
4767                    6,
4768                    7,
4769                    8,
4770                    16 + LANE2 as u32,
4771                    10,
4772                    11,
4773                    12,
4774                    13,
4775                    14,
4776                    15
4777                ]
4778            ),
4779            10 => simd_shuffle!(
4780                a,
4781                b,
4782                [
4783                    0,
4784                    1,
4785                    2,
4786                    3,
4787                    4,
4788                    5,
4789                    6,
4790                    7,
4791                    8,
4792                    9,
4793                    16 + LANE2 as u32,
4794                    11,
4795                    12,
4796                    13,
4797                    14,
4798                    15
4799                ]
4800            ),
4801            11 => simd_shuffle!(
4802                a,
4803                b,
4804                [
4805                    0,
4806                    1,
4807                    2,
4808                    3,
4809                    4,
4810                    5,
4811                    6,
4812                    7,
4813                    8,
4814                    9,
4815                    10,
4816                    16 + LANE2 as u32,
4817                    12,
4818                    13,
4819                    14,
4820                    15
4821                ]
4822            ),
4823            12 => simd_shuffle!(
4824                a,
4825                b,
4826                [
4827                    0,
4828                    1,
4829                    2,
4830                    3,
4831                    4,
4832                    5,
4833                    6,
4834                    7,
4835                    8,
4836                    9,
4837                    10,
4838                    11,
4839                    16 + LANE2 as u32,
4840                    13,
4841                    14,
4842                    15
4843                ]
4844            ),
4845            13 => simd_shuffle!(
4846                a,
4847                b,
4848                [
4849                    0,
4850                    1,
4851                    2,
4852                    3,
4853                    4,
4854                    5,
4855                    6,
4856                    7,
4857                    8,
4858                    9,
4859                    10,
4860                    11,
4861                    12,
4862                    16 + LANE2 as u32,
4863                    14,
4864                    15
4865                ]
4866            ),
4867            14 => simd_shuffle!(
4868                a,
4869                b,
4870                [
4871                    0,
4872                    1,
4873                    2,
4874                    3,
4875                    4,
4876                    5,
4877                    6,
4878                    7,
4879                    8,
4880                    9,
4881                    10,
4882                    11,
4883                    12,
4884                    13,
4885                    16 + LANE2 as u32,
4886                    15
4887                ]
4888            ),
4889            15 => simd_shuffle!(
4890                a,
4891                b,
4892                [
4893                    0,
4894                    1,
4895                    2,
4896                    3,
4897                    4,
4898                    5,
4899                    6,
4900                    7,
4901                    8,
4902                    9,
4903                    10,
4904                    11,
4905                    12,
4906                    13,
4907                    14,
4908                    16 + LANE2 as u32
4909                ]
4910            ),
4911            _ => unreachable_unchecked(),
4912        }
4913    }
4914}
4915#[doc = "Insert vector element from another vector element"]
4916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4917#[inline]
4918#[target_feature(enable = "neon")]
4919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4920#[rustc_legacy_const_generics(1, 3)]
4921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4922pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4923    a: int16x8_t,
4924    b: int16x4_t,
4925) -> int16x8_t {
4926    static_assert_uimm_bits!(LANE1, 3);
4927    static_assert_uimm_bits!(LANE2, 2);
4928    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4929    unsafe {
4930        match LANE1 & 0b111 {
4931            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4932            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4933            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4934            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4935            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4936            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4937            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4938            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4939            _ => unreachable_unchecked(),
4940        }
4941    }
4942}
4943#[doc = "Insert vector element from another vector element"]
4944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
4945#[inline]
4946#[target_feature(enable = "neon")]
4947#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4948#[rustc_legacy_const_generics(1, 3)]
4949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4950pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
4951    a: int32x4_t,
4952    b: int32x2_t,
4953) -> int32x4_t {
4954    static_assert_uimm_bits!(LANE1, 2);
4955    static_assert_uimm_bits!(LANE2, 1);
4956    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4957    unsafe {
4958        match LANE1 & 0b11 {
4959            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4960            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4961            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4962            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4963            _ => unreachable_unchecked(),
4964        }
4965    }
4966}
4967#[doc = "Insert vector element from another vector element"]
4968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
4969#[inline]
4970#[target_feature(enable = "neon")]
4971#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4972#[rustc_legacy_const_generics(1, 3)]
4973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4974pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
4975    a: uint8x16_t,
4976    b: uint8x8_t,
4977) -> uint8x16_t {
4978    static_assert_uimm_bits!(LANE1, 4);
4979    static_assert_uimm_bits!(LANE2, 3);
4980    let b: uint8x16_t =
4981        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4982    unsafe {
4983        match LANE1 & 0b1111 {
4984            0 => simd_shuffle!(
4985                a,
4986                b,
4987                [
4988                    16 + LANE2 as u32,
4989                    1,
4990                    2,
4991                    3,
4992                    4,
4993                    5,
4994                    6,
4995                    7,
4996                    8,
4997                    9,
4998                    10,
4999                    11,
5000                    12,
5001                    13,
5002                    14,
5003                    15
5004                ]
5005            ),
5006            1 => simd_shuffle!(
5007                a,
5008                b,
5009                [
5010                    0,
5011                    16 + LANE2 as u32,
5012                    2,
5013                    3,
5014                    4,
5015                    5,
5016                    6,
5017                    7,
5018                    8,
5019                    9,
5020                    10,
5021                    11,
5022                    12,
5023                    13,
5024                    14,
5025                    15
5026                ]
5027            ),
5028            2 => simd_shuffle!(
5029                a,
5030                b,
5031                [
5032                    0,
5033                    1,
5034                    16 + LANE2 as u32,
5035                    3,
5036                    4,
5037                    5,
5038                    6,
5039                    7,
5040                    8,
5041                    9,
5042                    10,
5043                    11,
5044                    12,
5045                    13,
5046                    14,
5047                    15
5048                ]
5049            ),
5050            3 => simd_shuffle!(
5051                a,
5052                b,
5053                [
5054                    0,
5055                    1,
5056                    2,
5057                    16 + LANE2 as u32,
5058                    4,
5059                    5,
5060                    6,
5061                    7,
5062                    8,
5063                    9,
5064                    10,
5065                    11,
5066                    12,
5067                    13,
5068                    14,
5069                    15
5070                ]
5071            ),
5072            4 => simd_shuffle!(
5073                a,
5074                b,
5075                [
5076                    0,
5077                    1,
5078                    2,
5079                    3,
5080                    16 + LANE2 as u32,
5081                    5,
5082                    6,
5083                    7,
5084                    8,
5085                    9,
5086                    10,
5087                    11,
5088                    12,
5089                    13,
5090                    14,
5091                    15
5092                ]
5093            ),
5094            5 => simd_shuffle!(
5095                a,
5096                b,
5097                [
5098                    0,
5099                    1,
5100                    2,
5101                    3,
5102                    4,
5103                    16 + LANE2 as u32,
5104                    6,
5105                    7,
5106                    8,
5107                    9,
5108                    10,
5109                    11,
5110                    12,
5111                    13,
5112                    14,
5113                    15
5114                ]
5115            ),
5116            6 => simd_shuffle!(
5117                a,
5118                b,
5119                [
5120                    0,
5121                    1,
5122                    2,
5123                    3,
5124                    4,
5125                    5,
5126                    16 + LANE2 as u32,
5127                    7,
5128                    8,
5129                    9,
5130                    10,
5131                    11,
5132                    12,
5133                    13,
5134                    14,
5135                    15
5136                ]
5137            ),
5138            7 => simd_shuffle!(
5139                a,
5140                b,
5141                [
5142                    0,
5143                    1,
5144                    2,
5145                    3,
5146                    4,
5147                    5,
5148                    6,
5149                    16 + LANE2 as u32,
5150                    8,
5151                    9,
5152                    10,
5153                    11,
5154                    12,
5155                    13,
5156                    14,
5157                    15
5158                ]
5159            ),
5160            8 => simd_shuffle!(
5161                a,
5162                b,
5163                [
5164                    0,
5165                    1,
5166                    2,
5167                    3,
5168                    4,
5169                    5,
5170                    6,
5171                    7,
5172                    16 + LANE2 as u32,
5173                    9,
5174                    10,
5175                    11,
5176                    12,
5177                    13,
5178                    14,
5179                    15
5180                ]
5181            ),
5182            9 => simd_shuffle!(
5183                a,
5184                b,
5185                [
5186                    0,
5187                    1,
5188                    2,
5189                    3,
5190                    4,
5191                    5,
5192                    6,
5193                    7,
5194                    8,
5195                    16 + LANE2 as u32,
5196                    10,
5197                    11,
5198                    12,
5199                    13,
5200                    14,
5201                    15
5202                ]
5203            ),
5204            10 => simd_shuffle!(
5205                a,
5206                b,
5207                [
5208                    0,
5209                    1,
5210                    2,
5211                    3,
5212                    4,
5213                    5,
5214                    6,
5215                    7,
5216                    8,
5217                    9,
5218                    16 + LANE2 as u32,
5219                    11,
5220                    12,
5221                    13,
5222                    14,
5223                    15
5224                ]
5225            ),
5226            11 => simd_shuffle!(
5227                a,
5228                b,
5229                [
5230                    0,
5231                    1,
5232                    2,
5233                    3,
5234                    4,
5235                    5,
5236                    6,
5237                    7,
5238                    8,
5239                    9,
5240                    10,
5241                    16 + LANE2 as u32,
5242                    12,
5243                    13,
5244                    14,
5245                    15
5246                ]
5247            ),
5248            12 => simd_shuffle!(
5249                a,
5250                b,
5251                [
5252                    0,
5253                    1,
5254                    2,
5255                    3,
5256                    4,
5257                    5,
5258                    6,
5259                    7,
5260                    8,
5261                    9,
5262                    10,
5263                    11,
5264                    16 + LANE2 as u32,
5265                    13,
5266                    14,
5267                    15
5268                ]
5269            ),
5270            13 => simd_shuffle!(
5271                a,
5272                b,
5273                [
5274                    0,
5275                    1,
5276                    2,
5277                    3,
5278                    4,
5279                    5,
5280                    6,
5281                    7,
5282                    8,
5283                    9,
5284                    10,
5285                    11,
5286                    12,
5287                    16 + LANE2 as u32,
5288                    14,
5289                    15
5290                ]
5291            ),
5292            14 => simd_shuffle!(
5293                a,
5294                b,
5295                [
5296                    0,
5297                    1,
5298                    2,
5299                    3,
5300                    4,
5301                    5,
5302                    6,
5303                    7,
5304                    8,
5305                    9,
5306                    10,
5307                    11,
5308                    12,
5309                    13,
5310                    16 + LANE2 as u32,
5311                    15
5312                ]
5313            ),
5314            15 => simd_shuffle!(
5315                a,
5316                b,
5317                [
5318                    0,
5319                    1,
5320                    2,
5321                    3,
5322                    4,
5323                    5,
5324                    6,
5325                    7,
5326                    8,
5327                    9,
5328                    10,
5329                    11,
5330                    12,
5331                    13,
5332                    14,
5333                    16 + LANE2 as u32
5334                ]
5335            ),
5336            _ => unreachable_unchecked(),
5337        }
5338    }
5339}
5340#[doc = "Insert vector element from another vector element"]
5341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5342#[inline]
5343#[target_feature(enable = "neon")]
5344#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5345#[rustc_legacy_const_generics(1, 3)]
5346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5347pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5348    a: uint16x8_t,
5349    b: uint16x4_t,
5350) -> uint16x8_t {
5351    static_assert_uimm_bits!(LANE1, 3);
5352    static_assert_uimm_bits!(LANE2, 2);
5353    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5354    unsafe {
5355        match LANE1 & 0b111 {
5356            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5357            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5358            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5359            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5360            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5361            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5362            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5363            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5364            _ => unreachable_unchecked(),
5365        }
5366    }
5367}
5368#[doc = "Insert vector element from another vector element"]
5369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5370#[inline]
5371#[target_feature(enable = "neon")]
5372#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5373#[rustc_legacy_const_generics(1, 3)]
5374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5375pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5376    a: uint32x4_t,
5377    b: uint32x2_t,
5378) -> uint32x4_t {
5379    static_assert_uimm_bits!(LANE1, 2);
5380    static_assert_uimm_bits!(LANE2, 1);
5381    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5382    unsafe {
5383        match LANE1 & 0b11 {
5384            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5385            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5386            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5387            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5388            _ => unreachable_unchecked(),
5389        }
5390    }
5391}
5392#[doc = "Insert vector element from another vector element"]
5393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5394#[inline]
5395#[target_feature(enable = "neon")]
5396#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5397#[rustc_legacy_const_generics(1, 3)]
5398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5399pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5400    a: poly8x16_t,
5401    b: poly8x8_t,
5402) -> poly8x16_t {
5403    static_assert_uimm_bits!(LANE1, 4);
5404    static_assert_uimm_bits!(LANE2, 3);
5405    let b: poly8x16_t =
5406        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5407    unsafe {
5408        match LANE1 & 0b1111 {
5409            0 => simd_shuffle!(
5410                a,
5411                b,
5412                [
5413                    16 + LANE2 as u32,
5414                    1,
5415                    2,
5416                    3,
5417                    4,
5418                    5,
5419                    6,
5420                    7,
5421                    8,
5422                    9,
5423                    10,
5424                    11,
5425                    12,
5426                    13,
5427                    14,
5428                    15
5429                ]
5430            ),
5431            1 => simd_shuffle!(
5432                a,
5433                b,
5434                [
5435                    0,
5436                    16 + LANE2 as u32,
5437                    2,
5438                    3,
5439                    4,
5440                    5,
5441                    6,
5442                    7,
5443                    8,
5444                    9,
5445                    10,
5446                    11,
5447                    12,
5448                    13,
5449                    14,
5450                    15
5451                ]
5452            ),
5453            2 => simd_shuffle!(
5454                a,
5455                b,
5456                [
5457                    0,
5458                    1,
5459                    16 + LANE2 as u32,
5460                    3,
5461                    4,
5462                    5,
5463                    6,
5464                    7,
5465                    8,
5466                    9,
5467                    10,
5468                    11,
5469                    12,
5470                    13,
5471                    14,
5472                    15
5473                ]
5474            ),
5475            3 => simd_shuffle!(
5476                a,
5477                b,
5478                [
5479                    0,
5480                    1,
5481                    2,
5482                    16 + LANE2 as u32,
5483                    4,
5484                    5,
5485                    6,
5486                    7,
5487                    8,
5488                    9,
5489                    10,
5490                    11,
5491                    12,
5492                    13,
5493                    14,
5494                    15
5495                ]
5496            ),
5497            4 => simd_shuffle!(
5498                a,
5499                b,
5500                [
5501                    0,
5502                    1,
5503                    2,
5504                    3,
5505                    16 + LANE2 as u32,
5506                    5,
5507                    6,
5508                    7,
5509                    8,
5510                    9,
5511                    10,
5512                    11,
5513                    12,
5514                    13,
5515                    14,
5516                    15
5517                ]
5518            ),
5519            5 => simd_shuffle!(
5520                a,
5521                b,
5522                [
5523                    0,
5524                    1,
5525                    2,
5526                    3,
5527                    4,
5528                    16 + LANE2 as u32,
5529                    6,
5530                    7,
5531                    8,
5532                    9,
5533                    10,
5534                    11,
5535                    12,
5536                    13,
5537                    14,
5538                    15
5539                ]
5540            ),
5541            6 => simd_shuffle!(
5542                a,
5543                b,
5544                [
5545                    0,
5546                    1,
5547                    2,
5548                    3,
5549                    4,
5550                    5,
5551                    16 + LANE2 as u32,
5552                    7,
5553                    8,
5554                    9,
5555                    10,
5556                    11,
5557                    12,
5558                    13,
5559                    14,
5560                    15
5561                ]
5562            ),
5563            7 => simd_shuffle!(
5564                a,
5565                b,
5566                [
5567                    0,
5568                    1,
5569                    2,
5570                    3,
5571                    4,
5572                    5,
5573                    6,
5574                    16 + LANE2 as u32,
5575                    8,
5576                    9,
5577                    10,
5578                    11,
5579                    12,
5580                    13,
5581                    14,
5582                    15
5583                ]
5584            ),
5585            8 => simd_shuffle!(
5586                a,
5587                b,
5588                [
5589                    0,
5590                    1,
5591                    2,
5592                    3,
5593                    4,
5594                    5,
5595                    6,
5596                    7,
5597                    16 + LANE2 as u32,
5598                    9,
5599                    10,
5600                    11,
5601                    12,
5602                    13,
5603                    14,
5604                    15
5605                ]
5606            ),
5607            9 => simd_shuffle!(
5608                a,
5609                b,
5610                [
5611                    0,
5612                    1,
5613                    2,
5614                    3,
5615                    4,
5616                    5,
5617                    6,
5618                    7,
5619                    8,
5620                    16 + LANE2 as u32,
5621                    10,
5622                    11,
5623                    12,
5624                    13,
5625                    14,
5626                    15
5627                ]
5628            ),
5629            10 => simd_shuffle!(
5630                a,
5631                b,
5632                [
5633                    0,
5634                    1,
5635                    2,
5636                    3,
5637                    4,
5638                    5,
5639                    6,
5640                    7,
5641                    8,
5642                    9,
5643                    16 + LANE2 as u32,
5644                    11,
5645                    12,
5646                    13,
5647                    14,
5648                    15
5649                ]
5650            ),
5651            11 => simd_shuffle!(
5652                a,
5653                b,
5654                [
5655                    0,
5656                    1,
5657                    2,
5658                    3,
5659                    4,
5660                    5,
5661                    6,
5662                    7,
5663                    8,
5664                    9,
5665                    10,
5666                    16 + LANE2 as u32,
5667                    12,
5668                    13,
5669                    14,
5670                    15
5671                ]
5672            ),
5673            12 => simd_shuffle!(
5674                a,
5675                b,
5676                [
5677                    0,
5678                    1,
5679                    2,
5680                    3,
5681                    4,
5682                    5,
5683                    6,
5684                    7,
5685                    8,
5686                    9,
5687                    10,
5688                    11,
5689                    16 + LANE2 as u32,
5690                    13,
5691                    14,
5692                    15
5693                ]
5694            ),
5695            13 => simd_shuffle!(
5696                a,
5697                b,
5698                [
5699                    0,
5700                    1,
5701                    2,
5702                    3,
5703                    4,
5704                    5,
5705                    6,
5706                    7,
5707                    8,
5708                    9,
5709                    10,
5710                    11,
5711                    12,
5712                    16 + LANE2 as u32,
5713                    14,
5714                    15
5715                ]
5716            ),
5717            14 => simd_shuffle!(
5718                a,
5719                b,
5720                [
5721                    0,
5722                    1,
5723                    2,
5724                    3,
5725                    4,
5726                    5,
5727                    6,
5728                    7,
5729                    8,
5730                    9,
5731                    10,
5732                    11,
5733                    12,
5734                    13,
5735                    16 + LANE2 as u32,
5736                    15
5737                ]
5738            ),
5739            15 => simd_shuffle!(
5740                a,
5741                b,
5742                [
5743                    0,
5744                    1,
5745                    2,
5746                    3,
5747                    4,
5748                    5,
5749                    6,
5750                    7,
5751                    8,
5752                    9,
5753                    10,
5754                    11,
5755                    12,
5756                    13,
5757                    14,
5758                    16 + LANE2 as u32
5759                ]
5760            ),
5761            _ => unreachable_unchecked(),
5762        }
5763    }
5764}
5765#[doc = "Insert vector element from another vector element"]
5766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5767#[inline]
5768#[target_feature(enable = "neon")]
5769#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5770#[rustc_legacy_const_generics(1, 3)]
5771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5772pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5773    a: poly16x8_t,
5774    b: poly16x4_t,
5775) -> poly16x8_t {
5776    static_assert_uimm_bits!(LANE1, 3);
5777    static_assert_uimm_bits!(LANE2, 2);
5778    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5779    unsafe {
5780        match LANE1 & 0b111 {
5781            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5782            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5783            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5784            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5785            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5786            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5787            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5788            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5789            _ => unreachable_unchecked(),
5790        }
5791    }
5792}
5793#[doc = "Insert vector element from another vector element"]
5794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5795#[inline]
5796#[target_feature(enable = "neon")]
5797#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5798#[rustc_legacy_const_generics(1, 3)]
5799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5800pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5801    a: float32x4_t,
5802    b: float32x4_t,
5803) -> float32x4_t {
5804    static_assert_uimm_bits!(LANE1, 2);
5805    static_assert_uimm_bits!(LANE2, 2);
5806    unsafe {
5807        match LANE1 & 0b11 {
5808            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5809            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5810            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5811            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5812            _ => unreachable_unchecked(),
5813        }
5814    }
5815}
5816#[doc = "Insert vector element from another vector element"]
5817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5818#[inline]
5819#[target_feature(enable = "neon")]
5820#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5821#[rustc_legacy_const_generics(1, 3)]
5822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5823pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5824    a: float64x2_t,
5825    b: float64x2_t,
5826) -> float64x2_t {
5827    static_assert_uimm_bits!(LANE1, 1);
5828    static_assert_uimm_bits!(LANE2, 1);
5829    unsafe {
5830        match LANE1 & 0b1 {
5831            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5832            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5833            _ => unreachable_unchecked(),
5834        }
5835    }
5836}
5837#[doc = "Insert vector element from another vector element"]
5838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5839#[inline]
5840#[target_feature(enable = "neon")]
5841#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5842#[rustc_legacy_const_generics(1, 3)]
5843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5844pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5845    a: int8x16_t,
5846    b: int8x16_t,
5847) -> int8x16_t {
5848    static_assert_uimm_bits!(LANE1, 4);
5849    static_assert_uimm_bits!(LANE2, 4);
5850    unsafe {
5851        match LANE1 & 0b1111 {
5852            0 => simd_shuffle!(
5853                a,
5854                b,
5855                [
5856                    16 + LANE2 as u32,
5857                    1,
5858                    2,
5859                    3,
5860                    4,
5861                    5,
5862                    6,
5863                    7,
5864                    8,
5865                    9,
5866                    10,
5867                    11,
5868                    12,
5869                    13,
5870                    14,
5871                    15
5872                ]
5873            ),
5874            1 => simd_shuffle!(
5875                a,
5876                b,
5877                [
5878                    0,
5879                    16 + LANE2 as u32,
5880                    2,
5881                    3,
5882                    4,
5883                    5,
5884                    6,
5885                    7,
5886                    8,
5887                    9,
5888                    10,
5889                    11,
5890                    12,
5891                    13,
5892                    14,
5893                    15
5894                ]
5895            ),
5896            2 => simd_shuffle!(
5897                a,
5898                b,
5899                [
5900                    0,
5901                    1,
5902                    16 + LANE2 as u32,
5903                    3,
5904                    4,
5905                    5,
5906                    6,
5907                    7,
5908                    8,
5909                    9,
5910                    10,
5911                    11,
5912                    12,
5913                    13,
5914                    14,
5915                    15
5916                ]
5917            ),
5918            3 => simd_shuffle!(
5919                a,
5920                b,
5921                [
5922                    0,
5923                    1,
5924                    2,
5925                    16 + LANE2 as u32,
5926                    4,
5927                    5,
5928                    6,
5929                    7,
5930                    8,
5931                    9,
5932                    10,
5933                    11,
5934                    12,
5935                    13,
5936                    14,
5937                    15
5938                ]
5939            ),
5940            4 => simd_shuffle!(
5941                a,
5942                b,
5943                [
5944                    0,
5945                    1,
5946                    2,
5947                    3,
5948                    16 + LANE2 as u32,
5949                    5,
5950                    6,
5951                    7,
5952                    8,
5953                    9,
5954                    10,
5955                    11,
5956                    12,
5957                    13,
5958                    14,
5959                    15
5960                ]
5961            ),
5962            5 => simd_shuffle!(
5963                a,
5964                b,
5965                [
5966                    0,
5967                    1,
5968                    2,
5969                    3,
5970                    4,
5971                    16 + LANE2 as u32,
5972                    6,
5973                    7,
5974                    8,
5975                    9,
5976                    10,
5977                    11,
5978                    12,
5979                    13,
5980                    14,
5981                    15
5982                ]
5983            ),
5984            6 => simd_shuffle!(
5985                a,
5986                b,
5987                [
5988                    0,
5989                    1,
5990                    2,
5991                    3,
5992                    4,
5993                    5,
5994                    16 + LANE2 as u32,
5995                    7,
5996                    8,
5997                    9,
5998                    10,
5999                    11,
6000                    12,
6001                    13,
6002                    14,
6003                    15
6004                ]
6005            ),
6006            7 => simd_shuffle!(
6007                a,
6008                b,
6009                [
6010                    0,
6011                    1,
6012                    2,
6013                    3,
6014                    4,
6015                    5,
6016                    6,
6017                    16 + LANE2 as u32,
6018                    8,
6019                    9,
6020                    10,
6021                    11,
6022                    12,
6023                    13,
6024                    14,
6025                    15
6026                ]
6027            ),
6028            8 => simd_shuffle!(
6029                a,
6030                b,
6031                [
6032                    0,
6033                    1,
6034                    2,
6035                    3,
6036                    4,
6037                    5,
6038                    6,
6039                    7,
6040                    16 + LANE2 as u32,
6041                    9,
6042                    10,
6043                    11,
6044                    12,
6045                    13,
6046                    14,
6047                    15
6048                ]
6049            ),
6050            9 => simd_shuffle!(
6051                a,
6052                b,
6053                [
6054                    0,
6055                    1,
6056                    2,
6057                    3,
6058                    4,
6059                    5,
6060                    6,
6061                    7,
6062                    8,
6063                    16 + LANE2 as u32,
6064                    10,
6065                    11,
6066                    12,
6067                    13,
6068                    14,
6069                    15
6070                ]
6071            ),
6072            10 => simd_shuffle!(
6073                a,
6074                b,
6075                [
6076                    0,
6077                    1,
6078                    2,
6079                    3,
6080                    4,
6081                    5,
6082                    6,
6083                    7,
6084                    8,
6085                    9,
6086                    16 + LANE2 as u32,
6087                    11,
6088                    12,
6089                    13,
6090                    14,
6091                    15
6092                ]
6093            ),
6094            11 => simd_shuffle!(
6095                a,
6096                b,
6097                [
6098                    0,
6099                    1,
6100                    2,
6101                    3,
6102                    4,
6103                    5,
6104                    6,
6105                    7,
6106                    8,
6107                    9,
6108                    10,
6109                    16 + LANE2 as u32,
6110                    12,
6111                    13,
6112                    14,
6113                    15
6114                ]
6115            ),
6116            12 => simd_shuffle!(
6117                a,
6118                b,
6119                [
6120                    0,
6121                    1,
6122                    2,
6123                    3,
6124                    4,
6125                    5,
6126                    6,
6127                    7,
6128                    8,
6129                    9,
6130                    10,
6131                    11,
6132                    16 + LANE2 as u32,
6133                    13,
6134                    14,
6135                    15
6136                ]
6137            ),
6138            13 => simd_shuffle!(
6139                a,
6140                b,
6141                [
6142                    0,
6143                    1,
6144                    2,
6145                    3,
6146                    4,
6147                    5,
6148                    6,
6149                    7,
6150                    8,
6151                    9,
6152                    10,
6153                    11,
6154                    12,
6155                    16 + LANE2 as u32,
6156                    14,
6157                    15
6158                ]
6159            ),
6160            14 => simd_shuffle!(
6161                a,
6162                b,
6163                [
6164                    0,
6165                    1,
6166                    2,
6167                    3,
6168                    4,
6169                    5,
6170                    6,
6171                    7,
6172                    8,
6173                    9,
6174                    10,
6175                    11,
6176                    12,
6177                    13,
6178                    16 + LANE2 as u32,
6179                    15
6180                ]
6181            ),
6182            15 => simd_shuffle!(
6183                a,
6184                b,
6185                [
6186                    0,
6187                    1,
6188                    2,
6189                    3,
6190                    4,
6191                    5,
6192                    6,
6193                    7,
6194                    8,
6195                    9,
6196                    10,
6197                    11,
6198                    12,
6199                    13,
6200                    14,
6201                    16 + LANE2 as u32
6202                ]
6203            ),
6204            _ => unreachable_unchecked(),
6205        }
6206    }
6207}
6208#[doc = "Insert vector element from another vector element"]
6209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6210#[inline]
6211#[target_feature(enable = "neon")]
6212#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6213#[rustc_legacy_const_generics(1, 3)]
6214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6215pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6216    a: int16x8_t,
6217    b: int16x8_t,
6218) -> int16x8_t {
6219    static_assert_uimm_bits!(LANE1, 3);
6220    static_assert_uimm_bits!(LANE2, 3);
6221    unsafe {
6222        match LANE1 & 0b111 {
6223            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6224            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6225            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6226            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6227            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6228            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6229            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6230            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6231            _ => unreachable_unchecked(),
6232        }
6233    }
6234}
6235#[doc = "Insert vector element from another vector element"]
6236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6237#[inline]
6238#[target_feature(enable = "neon")]
6239#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6240#[rustc_legacy_const_generics(1, 3)]
6241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6242pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6243    a: int32x4_t,
6244    b: int32x4_t,
6245) -> int32x4_t {
6246    static_assert_uimm_bits!(LANE1, 2);
6247    static_assert_uimm_bits!(LANE2, 2);
6248    unsafe {
6249        match LANE1 & 0b11 {
6250            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6251            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6252            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6253            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6254            _ => unreachable_unchecked(),
6255        }
6256    }
6257}
6258#[doc = "Insert vector element from another vector element"]
6259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6260#[inline]
6261#[target_feature(enable = "neon")]
6262#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6263#[rustc_legacy_const_generics(1, 3)]
6264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6265pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6266    a: int64x2_t,
6267    b: int64x2_t,
6268) -> int64x2_t {
6269    static_assert_uimm_bits!(LANE1, 1);
6270    static_assert_uimm_bits!(LANE2, 1);
6271    unsafe {
6272        match LANE1 & 0b1 {
6273            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6274            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6275            _ => unreachable_unchecked(),
6276        }
6277    }
6278}
6279#[doc = "Insert vector element from another vector element"]
6280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6281#[inline]
6282#[target_feature(enable = "neon")]
6283#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6284#[rustc_legacy_const_generics(1, 3)]
6285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6286pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6287    a: uint8x16_t,
6288    b: uint8x16_t,
6289) -> uint8x16_t {
6290    static_assert_uimm_bits!(LANE1, 4);
6291    static_assert_uimm_bits!(LANE2, 4);
6292    unsafe {
6293        match LANE1 & 0b1111 {
6294            0 => simd_shuffle!(
6295                a,
6296                b,
6297                [
6298                    16 + LANE2 as u32,
6299                    1,
6300                    2,
6301                    3,
6302                    4,
6303                    5,
6304                    6,
6305                    7,
6306                    8,
6307                    9,
6308                    10,
6309                    11,
6310                    12,
6311                    13,
6312                    14,
6313                    15
6314                ]
6315            ),
6316            1 => simd_shuffle!(
6317                a,
6318                b,
6319                [
6320                    0,
6321                    16 + LANE2 as u32,
6322                    2,
6323                    3,
6324                    4,
6325                    5,
6326                    6,
6327                    7,
6328                    8,
6329                    9,
6330                    10,
6331                    11,
6332                    12,
6333                    13,
6334                    14,
6335                    15
6336                ]
6337            ),
6338            2 => simd_shuffle!(
6339                a,
6340                b,
6341                [
6342                    0,
6343                    1,
6344                    16 + LANE2 as u32,
6345                    3,
6346                    4,
6347                    5,
6348                    6,
6349                    7,
6350                    8,
6351                    9,
6352                    10,
6353                    11,
6354                    12,
6355                    13,
6356                    14,
6357                    15
6358                ]
6359            ),
6360            3 => simd_shuffle!(
6361                a,
6362                b,
6363                [
6364                    0,
6365                    1,
6366                    2,
6367                    16 + LANE2 as u32,
6368                    4,
6369                    5,
6370                    6,
6371                    7,
6372                    8,
6373                    9,
6374                    10,
6375                    11,
6376                    12,
6377                    13,
6378                    14,
6379                    15
6380                ]
6381            ),
6382            4 => simd_shuffle!(
6383                a,
6384                b,
6385                [
6386                    0,
6387                    1,
6388                    2,
6389                    3,
6390                    16 + LANE2 as u32,
6391                    5,
6392                    6,
6393                    7,
6394                    8,
6395                    9,
6396                    10,
6397                    11,
6398                    12,
6399                    13,
6400                    14,
6401                    15
6402                ]
6403            ),
6404            5 => simd_shuffle!(
6405                a,
6406                b,
6407                [
6408                    0,
6409                    1,
6410                    2,
6411                    3,
6412                    4,
6413                    16 + LANE2 as u32,
6414                    6,
6415                    7,
6416                    8,
6417                    9,
6418                    10,
6419                    11,
6420                    12,
6421                    13,
6422                    14,
6423                    15
6424                ]
6425            ),
6426            6 => simd_shuffle!(
6427                a,
6428                b,
6429                [
6430                    0,
6431                    1,
6432                    2,
6433                    3,
6434                    4,
6435                    5,
6436                    16 + LANE2 as u32,
6437                    7,
6438                    8,
6439                    9,
6440                    10,
6441                    11,
6442                    12,
6443                    13,
6444                    14,
6445                    15
6446                ]
6447            ),
6448            7 => simd_shuffle!(
6449                a,
6450                b,
6451                [
6452                    0,
6453                    1,
6454                    2,
6455                    3,
6456                    4,
6457                    5,
6458                    6,
6459                    16 + LANE2 as u32,
6460                    8,
6461                    9,
6462                    10,
6463                    11,
6464                    12,
6465                    13,
6466                    14,
6467                    15
6468                ]
6469            ),
6470            8 => simd_shuffle!(
6471                a,
6472                b,
6473                [
6474                    0,
6475                    1,
6476                    2,
6477                    3,
6478                    4,
6479                    5,
6480                    6,
6481                    7,
6482                    16 + LANE2 as u32,
6483                    9,
6484                    10,
6485                    11,
6486                    12,
6487                    13,
6488                    14,
6489                    15
6490                ]
6491            ),
6492            9 => simd_shuffle!(
6493                a,
6494                b,
6495                [
6496                    0,
6497                    1,
6498                    2,
6499                    3,
6500                    4,
6501                    5,
6502                    6,
6503                    7,
6504                    8,
6505                    16 + LANE2 as u32,
6506                    10,
6507                    11,
6508                    12,
6509                    13,
6510                    14,
6511                    15
6512                ]
6513            ),
6514            10 => simd_shuffle!(
6515                a,
6516                b,
6517                [
6518                    0,
6519                    1,
6520                    2,
6521                    3,
6522                    4,
6523                    5,
6524                    6,
6525                    7,
6526                    8,
6527                    9,
6528                    16 + LANE2 as u32,
6529                    11,
6530                    12,
6531                    13,
6532                    14,
6533                    15
6534                ]
6535            ),
6536            11 => simd_shuffle!(
6537                a,
6538                b,
6539                [
6540                    0,
6541                    1,
6542                    2,
6543                    3,
6544                    4,
6545                    5,
6546                    6,
6547                    7,
6548                    8,
6549                    9,
6550                    10,
6551                    16 + LANE2 as u32,
6552                    12,
6553                    13,
6554                    14,
6555                    15
6556                ]
6557            ),
6558            12 => simd_shuffle!(
6559                a,
6560                b,
6561                [
6562                    0,
6563                    1,
6564                    2,
6565                    3,
6566                    4,
6567                    5,
6568                    6,
6569                    7,
6570                    8,
6571                    9,
6572                    10,
6573                    11,
6574                    16 + LANE2 as u32,
6575                    13,
6576                    14,
6577                    15
6578                ]
6579            ),
6580            13 => simd_shuffle!(
6581                a,
6582                b,
6583                [
6584                    0,
6585                    1,
6586                    2,
6587                    3,
6588                    4,
6589                    5,
6590                    6,
6591                    7,
6592                    8,
6593                    9,
6594                    10,
6595                    11,
6596                    12,
6597                    16 + LANE2 as u32,
6598                    14,
6599                    15
6600                ]
6601            ),
6602            14 => simd_shuffle!(
6603                a,
6604                b,
6605                [
6606                    0,
6607                    1,
6608                    2,
6609                    3,
6610                    4,
6611                    5,
6612                    6,
6613                    7,
6614                    8,
6615                    9,
6616                    10,
6617                    11,
6618                    12,
6619                    13,
6620                    16 + LANE2 as u32,
6621                    15
6622                ]
6623            ),
6624            15 => simd_shuffle!(
6625                a,
6626                b,
6627                [
6628                    0,
6629                    1,
6630                    2,
6631                    3,
6632                    4,
6633                    5,
6634                    6,
6635                    7,
6636                    8,
6637                    9,
6638                    10,
6639                    11,
6640                    12,
6641                    13,
6642                    14,
6643                    16 + LANE2 as u32
6644                ]
6645            ),
6646            _ => unreachable_unchecked(),
6647        }
6648    }
6649}
6650#[doc = "Insert vector element from another vector element"]
6651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6652#[inline]
6653#[target_feature(enable = "neon")]
6654#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6655#[rustc_legacy_const_generics(1, 3)]
6656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6657pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6658    a: uint16x8_t,
6659    b: uint16x8_t,
6660) -> uint16x8_t {
6661    static_assert_uimm_bits!(LANE1, 3);
6662    static_assert_uimm_bits!(LANE2, 3);
6663    unsafe {
6664        match LANE1 & 0b111 {
6665            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6666            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6667            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6668            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6669            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6670            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6671            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6672            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6673            _ => unreachable_unchecked(),
6674        }
6675    }
6676}
6677#[doc = "Insert vector element from another vector element"]
6678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6679#[inline]
6680#[target_feature(enable = "neon")]
6681#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6682#[rustc_legacy_const_generics(1, 3)]
6683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6684pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6685    a: uint32x4_t,
6686    b: uint32x4_t,
6687) -> uint32x4_t {
6688    static_assert_uimm_bits!(LANE1, 2);
6689    static_assert_uimm_bits!(LANE2, 2);
6690    unsafe {
6691        match LANE1 & 0b11 {
6692            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6693            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6694            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6695            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6696            _ => unreachable_unchecked(),
6697        }
6698    }
6699}
6700#[doc = "Insert vector element from another vector element"]
6701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6702#[inline]
6703#[target_feature(enable = "neon")]
6704#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6705#[rustc_legacy_const_generics(1, 3)]
6706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6707pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6708    a: uint64x2_t,
6709    b: uint64x2_t,
6710) -> uint64x2_t {
6711    static_assert_uimm_bits!(LANE1, 1);
6712    static_assert_uimm_bits!(LANE2, 1);
6713    unsafe {
6714        match LANE1 & 0b1 {
6715            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6716            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6717            _ => unreachable_unchecked(),
6718        }
6719    }
6720}
6721#[doc = "Insert vector element from another vector element"]
6722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6723#[inline]
6724#[target_feature(enable = "neon")]
6725#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6726#[rustc_legacy_const_generics(1, 3)]
6727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6728pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6729    a: poly8x16_t,
6730    b: poly8x16_t,
6731) -> poly8x16_t {
6732    static_assert_uimm_bits!(LANE1, 4);
6733    static_assert_uimm_bits!(LANE2, 4);
6734    unsafe {
6735        match LANE1 & 0b1111 {
6736            0 => simd_shuffle!(
6737                a,
6738                b,
6739                [
6740                    16 + LANE2 as u32,
6741                    1,
6742                    2,
6743                    3,
6744                    4,
6745                    5,
6746                    6,
6747                    7,
6748                    8,
6749                    9,
6750                    10,
6751                    11,
6752                    12,
6753                    13,
6754                    14,
6755                    15
6756                ]
6757            ),
6758            1 => simd_shuffle!(
6759                a,
6760                b,
6761                [
6762                    0,
6763                    16 + LANE2 as u32,
6764                    2,
6765                    3,
6766                    4,
6767                    5,
6768                    6,
6769                    7,
6770                    8,
6771                    9,
6772                    10,
6773                    11,
6774                    12,
6775                    13,
6776                    14,
6777                    15
6778                ]
6779            ),
6780            2 => simd_shuffle!(
6781                a,
6782                b,
6783                [
6784                    0,
6785                    1,
6786                    16 + LANE2 as u32,
6787                    3,
6788                    4,
6789                    5,
6790                    6,
6791                    7,
6792                    8,
6793                    9,
6794                    10,
6795                    11,
6796                    12,
6797                    13,
6798                    14,
6799                    15
6800                ]
6801            ),
6802            3 => simd_shuffle!(
6803                a,
6804                b,
6805                [
6806                    0,
6807                    1,
6808                    2,
6809                    16 + LANE2 as u32,
6810                    4,
6811                    5,
6812                    6,
6813                    7,
6814                    8,
6815                    9,
6816                    10,
6817                    11,
6818                    12,
6819                    13,
6820                    14,
6821                    15
6822                ]
6823            ),
6824            4 => simd_shuffle!(
6825                a,
6826                b,
6827                [
6828                    0,
6829                    1,
6830                    2,
6831                    3,
6832                    16 + LANE2 as u32,
6833                    5,
6834                    6,
6835                    7,
6836                    8,
6837                    9,
6838                    10,
6839                    11,
6840                    12,
6841                    13,
6842                    14,
6843                    15
6844                ]
6845            ),
6846            5 => simd_shuffle!(
6847                a,
6848                b,
6849                [
6850                    0,
6851                    1,
6852                    2,
6853                    3,
6854                    4,
6855                    16 + LANE2 as u32,
6856                    6,
6857                    7,
6858                    8,
6859                    9,
6860                    10,
6861                    11,
6862                    12,
6863                    13,
6864                    14,
6865                    15
6866                ]
6867            ),
6868            6 => simd_shuffle!(
6869                a,
6870                b,
6871                [
6872                    0,
6873                    1,
6874                    2,
6875                    3,
6876                    4,
6877                    5,
6878                    16 + LANE2 as u32,
6879                    7,
6880                    8,
6881                    9,
6882                    10,
6883                    11,
6884                    12,
6885                    13,
6886                    14,
6887                    15
6888                ]
6889            ),
6890            7 => simd_shuffle!(
6891                a,
6892                b,
6893                [
6894                    0,
6895                    1,
6896                    2,
6897                    3,
6898                    4,
6899                    5,
6900                    6,
6901                    16 + LANE2 as u32,
6902                    8,
6903                    9,
6904                    10,
6905                    11,
6906                    12,
6907                    13,
6908                    14,
6909                    15
6910                ]
6911            ),
6912            8 => simd_shuffle!(
6913                a,
6914                b,
6915                [
6916                    0,
6917                    1,
6918                    2,
6919                    3,
6920                    4,
6921                    5,
6922                    6,
6923                    7,
6924                    16 + LANE2 as u32,
6925                    9,
6926                    10,
6927                    11,
6928                    12,
6929                    13,
6930                    14,
6931                    15
6932                ]
6933            ),
6934            9 => simd_shuffle!(
6935                a,
6936                b,
6937                [
6938                    0,
6939                    1,
6940                    2,
6941                    3,
6942                    4,
6943                    5,
6944                    6,
6945                    7,
6946                    8,
6947                    16 + LANE2 as u32,
6948                    10,
6949                    11,
6950                    12,
6951                    13,
6952                    14,
6953                    15
6954                ]
6955            ),
6956            10 => simd_shuffle!(
6957                a,
6958                b,
6959                [
6960                    0,
6961                    1,
6962                    2,
6963                    3,
6964                    4,
6965                    5,
6966                    6,
6967                    7,
6968                    8,
6969                    9,
6970                    16 + LANE2 as u32,
6971                    11,
6972                    12,
6973                    13,
6974                    14,
6975                    15
6976                ]
6977            ),
6978            11 => simd_shuffle!(
6979                a,
6980                b,
6981                [
6982                    0,
6983                    1,
6984                    2,
6985                    3,
6986                    4,
6987                    5,
6988                    6,
6989                    7,
6990                    8,
6991                    9,
6992                    10,
6993                    16 + LANE2 as u32,
6994                    12,
6995                    13,
6996                    14,
6997                    15
6998                ]
6999            ),
7000            12 => simd_shuffle!(
7001                a,
7002                b,
7003                [
7004                    0,
7005                    1,
7006                    2,
7007                    3,
7008                    4,
7009                    5,
7010                    6,
7011                    7,
7012                    8,
7013                    9,
7014                    10,
7015                    11,
7016                    16 + LANE2 as u32,
7017                    13,
7018                    14,
7019                    15
7020                ]
7021            ),
7022            13 => simd_shuffle!(
7023                a,
7024                b,
7025                [
7026                    0,
7027                    1,
7028                    2,
7029                    3,
7030                    4,
7031                    5,
7032                    6,
7033                    7,
7034                    8,
7035                    9,
7036                    10,
7037                    11,
7038                    12,
7039                    16 + LANE2 as u32,
7040                    14,
7041                    15
7042                ]
7043            ),
7044            14 => simd_shuffle!(
7045                a,
7046                b,
7047                [
7048                    0,
7049                    1,
7050                    2,
7051                    3,
7052                    4,
7053                    5,
7054                    6,
7055                    7,
7056                    8,
7057                    9,
7058                    10,
7059                    11,
7060                    12,
7061                    13,
7062                    16 + LANE2 as u32,
7063                    15
7064                ]
7065            ),
7066            15 => simd_shuffle!(
7067                a,
7068                b,
7069                [
7070                    0,
7071                    1,
7072                    2,
7073                    3,
7074                    4,
7075                    5,
7076                    6,
7077                    7,
7078                    8,
7079                    9,
7080                    10,
7081                    11,
7082                    12,
7083                    13,
7084                    14,
7085                    16 + LANE2 as u32
7086                ]
7087            ),
7088            _ => unreachable_unchecked(),
7089        }
7090    }
7091}
7092#[doc = "Insert vector element from another vector element"]
7093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7094#[inline]
7095#[target_feature(enable = "neon")]
7096#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7097#[rustc_legacy_const_generics(1, 3)]
7098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7099pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7100    a: poly16x8_t,
7101    b: poly16x8_t,
7102) -> poly16x8_t {
7103    static_assert_uimm_bits!(LANE1, 3);
7104    static_assert_uimm_bits!(LANE2, 3);
7105    unsafe {
7106        match LANE1 & 0b111 {
7107            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7108            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7109            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7110            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7111            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7112            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7113            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7114            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7115            _ => unreachable_unchecked(),
7116        }
7117    }
7118}
7119#[doc = "Insert vector element from another vector element"]
7120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7121#[inline]
7122#[target_feature(enable = "neon")]
7123#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7124#[rustc_legacy_const_generics(1, 3)]
7125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7126pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7127    a: poly64x2_t,
7128    b: poly64x2_t,
7129) -> poly64x2_t {
7130    static_assert_uimm_bits!(LANE1, 1);
7131    static_assert_uimm_bits!(LANE2, 1);
7132    unsafe {
7133        match LANE1 & 0b1 {
7134            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7135            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7136            _ => unreachable_unchecked(),
7137        }
7138    }
7139}
7140#[doc = "Insert vector element from another vector element"]
7141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7142#[inline]
7143#[target_feature(enable = "neon")]
7144#[cfg_attr(test, assert_instr(nop))]
7145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7146pub fn vcreate_f64(a: u64) -> float64x1_t {
7147    unsafe { transmute(a) }
7148}
7149#[doc = "Floating-point convert"]
7150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7151#[inline]
7152#[target_feature(enable = "neon")]
7153#[cfg_attr(test, assert_instr(fcvtn))]
7154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7155pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7156    unsafe { simd_cast(a) }
7157}
7158#[doc = "Floating-point convert to higher precision long"]
7159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7160#[inline]
7161#[target_feature(enable = "neon")]
7162#[cfg_attr(test, assert_instr(fcvtl))]
7163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7164pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7165    unsafe { simd_cast(a) }
7166}
7167#[doc = "Fixed-point convert to floating-point"]
7168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7169#[inline]
7170#[target_feature(enable = "neon")]
7171#[cfg_attr(test, assert_instr(scvtf))]
7172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7173pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7174    unsafe { simd_cast(a) }
7175}
7176#[doc = "Fixed-point convert to floating-point"]
7177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7178#[inline]
7179#[target_feature(enable = "neon")]
7180#[cfg_attr(test, assert_instr(scvtf))]
7181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7182pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7183    unsafe { simd_cast(a) }
7184}
7185#[doc = "Fixed-point convert to floating-point"]
7186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7187#[inline]
7188#[target_feature(enable = "neon")]
7189#[cfg_attr(test, assert_instr(ucvtf))]
7190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7191pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7192    unsafe { simd_cast(a) }
7193}
7194#[doc = "Fixed-point convert to floating-point"]
7195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7196#[inline]
7197#[target_feature(enable = "neon")]
7198#[cfg_attr(test, assert_instr(ucvtf))]
7199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7200pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7201    unsafe { simd_cast(a) }
7202}
7203#[doc = "Floating-point convert to lower precision"]
7204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7205#[inline]
7206#[cfg_attr(test, assert_instr(fcvtn2))]
7207#[target_feature(enable = "neon,fp16")]
7208#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7209#[cfg(not(target_arch = "arm64ec"))]
7210pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7211    vcombine_f16(a, vcvt_f16_f32(b))
7212}
7213#[doc = "Floating-point convert to higher precision"]
7214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7215#[inline]
7216#[cfg_attr(test, assert_instr(fcvtl2))]
7217#[target_feature(enable = "neon,fp16")]
7218#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7219#[cfg(not(target_arch = "arm64ec"))]
7220pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7221    vcvt_f32_f16(vget_high_f16(a))
7222}
7223#[doc = "Floating-point convert to lower precision narrow"]
7224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7225#[inline]
7226#[target_feature(enable = "neon")]
7227#[cfg_attr(test, assert_instr(fcvtn2))]
7228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7229pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7230    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7231}
7232#[doc = "Floating-point convert to higher precision long"]
7233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7234#[inline]
7235#[target_feature(enable = "neon")]
7236#[cfg_attr(test, assert_instr(fcvtl2))]
7237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7238pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7239    unsafe {
7240        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7241        simd_cast(b)
7242    }
7243}
7244#[doc = "Fixed-point convert to floating-point"]
7245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7246#[inline]
7247#[target_feature(enable = "neon")]
7248#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7249#[rustc_legacy_const_generics(1)]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7252    static_assert!(N >= 1 && N <= 64);
7253    unsafe extern "unadjusted" {
7254        #[cfg_attr(
7255            any(target_arch = "aarch64", target_arch = "arm64ec"),
7256            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7257        )]
7258        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7259    }
7260    unsafe { _vcvt_n_f64_s64(a, N) }
7261}
7262#[doc = "Fixed-point convert to floating-point"]
7263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7264#[inline]
7265#[target_feature(enable = "neon")]
7266#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7267#[rustc_legacy_const_generics(1)]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7270    static_assert!(N >= 1 && N <= 64);
7271    unsafe extern "unadjusted" {
7272        #[cfg_attr(
7273            any(target_arch = "aarch64", target_arch = "arm64ec"),
7274            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7275        )]
7276        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7277    }
7278    unsafe { _vcvtq_n_f64_s64(a, N) }
7279}
7280#[doc = "Fixed-point convert to floating-point"]
7281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7282#[inline]
7283#[target_feature(enable = "neon")]
7284#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7285#[rustc_legacy_const_generics(1)]
7286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7287pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7288    static_assert!(N >= 1 && N <= 64);
7289    unsafe extern "unadjusted" {
7290        #[cfg_attr(
7291            any(target_arch = "aarch64", target_arch = "arm64ec"),
7292            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7293        )]
7294        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7295    }
7296    unsafe { _vcvt_n_f64_u64(a, N) }
7297}
7298#[doc = "Fixed-point convert to floating-point"]
7299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7300#[inline]
7301#[target_feature(enable = "neon")]
7302#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7303#[rustc_legacy_const_generics(1)]
7304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7305pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7306    static_assert!(N >= 1 && N <= 64);
7307    unsafe extern "unadjusted" {
7308        #[cfg_attr(
7309            any(target_arch = "aarch64", target_arch = "arm64ec"),
7310            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7311        )]
7312        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7313    }
7314    unsafe { _vcvtq_n_f64_u64(a, N) }
7315}
7316#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7318#[inline]
7319#[target_feature(enable = "neon")]
7320#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7321#[rustc_legacy_const_generics(1)]
7322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7323pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7324    static_assert!(N >= 1 && N <= 64);
7325    unsafe extern "unadjusted" {
7326        #[cfg_attr(
7327            any(target_arch = "aarch64", target_arch = "arm64ec"),
7328            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7329        )]
7330        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7331    }
7332    unsafe { _vcvt_n_s64_f64(a, N) }
7333}
7334#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7336#[inline]
7337#[target_feature(enable = "neon")]
7338#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7339#[rustc_legacy_const_generics(1)]
7340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7341pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7342    static_assert!(N >= 1 && N <= 64);
7343    unsafe extern "unadjusted" {
7344        #[cfg_attr(
7345            any(target_arch = "aarch64", target_arch = "arm64ec"),
7346            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7347        )]
7348        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7349    }
7350    unsafe { _vcvtq_n_s64_f64(a, N) }
7351}
7352#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7354#[inline]
7355#[target_feature(enable = "neon")]
7356#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7357#[rustc_legacy_const_generics(1)]
7358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7359pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7360    static_assert!(N >= 1 && N <= 64);
7361    unsafe extern "unadjusted" {
7362        #[cfg_attr(
7363            any(target_arch = "aarch64", target_arch = "arm64ec"),
7364            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7365        )]
7366        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7367    }
7368    unsafe { _vcvt_n_u64_f64(a, N) }
7369}
7370#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7372#[inline]
7373#[target_feature(enable = "neon")]
7374#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7375#[rustc_legacy_const_generics(1)]
7376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7377pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7378    static_assert!(N >= 1 && N <= 64);
7379    unsafe extern "unadjusted" {
7380        #[cfg_attr(
7381            any(target_arch = "aarch64", target_arch = "arm64ec"),
7382            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7383        )]
7384        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7385    }
7386    unsafe { _vcvtq_n_u64_f64(a, N) }
7387}
7388#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7390#[inline]
7391#[target_feature(enable = "neon")]
7392#[cfg_attr(test, assert_instr(fcvtzs))]
7393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7394pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7395    unsafe extern "unadjusted" {
7396        #[cfg_attr(
7397            any(target_arch = "aarch64", target_arch = "arm64ec"),
7398            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7399        )]
7400        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7401    }
7402    unsafe { _vcvt_s64_f64(a) }
7403}
7404#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7406#[inline]
7407#[target_feature(enable = "neon")]
7408#[cfg_attr(test, assert_instr(fcvtzs))]
7409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7410pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7411    unsafe extern "unadjusted" {
7412        #[cfg_attr(
7413            any(target_arch = "aarch64", target_arch = "arm64ec"),
7414            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7415        )]
7416        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7417    }
7418    unsafe { _vcvtq_s64_f64(a) }
7419}
7420#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7422#[inline]
7423#[target_feature(enable = "neon")]
7424#[cfg_attr(test, assert_instr(fcvtzu))]
7425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7426pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7427    unsafe extern "unadjusted" {
7428        #[cfg_attr(
7429            any(target_arch = "aarch64", target_arch = "arm64ec"),
7430            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7431        )]
7432        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7433    }
7434    unsafe { _vcvt_u64_f64(a) }
7435}
7436#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7438#[inline]
7439#[target_feature(enable = "neon")]
7440#[cfg_attr(test, assert_instr(fcvtzu))]
7441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7442pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7443    unsafe extern "unadjusted" {
7444        #[cfg_attr(
7445            any(target_arch = "aarch64", target_arch = "arm64ec"),
7446            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7447        )]
7448        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7449    }
7450    unsafe { _vcvtq_u64_f64(a) }
7451}
7452#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7454#[inline]
7455#[cfg_attr(test, assert_instr(fcvtas))]
7456#[target_feature(enable = "neon,fp16")]
7457#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7458#[cfg(not(target_arch = "arm64ec"))]
7459pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7460    unsafe extern "unadjusted" {
7461        #[cfg_attr(
7462            any(target_arch = "aarch64", target_arch = "arm64ec"),
7463            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7464        )]
7465        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7466    }
7467    unsafe { _vcvta_s16_f16(a) }
7468}
7469#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7471#[inline]
7472#[cfg_attr(test, assert_instr(fcvtas))]
7473#[target_feature(enable = "neon,fp16")]
7474#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7475#[cfg(not(target_arch = "arm64ec"))]
7476pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7477    unsafe extern "unadjusted" {
7478        #[cfg_attr(
7479            any(target_arch = "aarch64", target_arch = "arm64ec"),
7480            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7481        )]
7482        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7483    }
7484    unsafe { _vcvtaq_s16_f16(a) }
7485}
7486#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7488#[inline]
7489#[target_feature(enable = "neon")]
7490#[cfg_attr(test, assert_instr(fcvtas))]
7491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7492pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7493    unsafe extern "unadjusted" {
7494        #[cfg_attr(
7495            any(target_arch = "aarch64", target_arch = "arm64ec"),
7496            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7497        )]
7498        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7499    }
7500    unsafe { _vcvta_s32_f32(a) }
7501}
7502#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7504#[inline]
7505#[target_feature(enable = "neon")]
7506#[cfg_attr(test, assert_instr(fcvtas))]
7507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7508pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7509    unsafe extern "unadjusted" {
7510        #[cfg_attr(
7511            any(target_arch = "aarch64", target_arch = "arm64ec"),
7512            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7513        )]
7514        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7515    }
7516    unsafe { _vcvtaq_s32_f32(a) }
7517}
7518#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7520#[inline]
7521#[target_feature(enable = "neon")]
7522#[cfg_attr(test, assert_instr(fcvtas))]
7523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7524pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7525    unsafe extern "unadjusted" {
7526        #[cfg_attr(
7527            any(target_arch = "aarch64", target_arch = "arm64ec"),
7528            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7529        )]
7530        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7531    }
7532    unsafe { _vcvta_s64_f64(a) }
7533}
7534#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7536#[inline]
7537#[target_feature(enable = "neon")]
7538#[cfg_attr(test, assert_instr(fcvtas))]
7539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7540pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7541    unsafe extern "unadjusted" {
7542        #[cfg_attr(
7543            any(target_arch = "aarch64", target_arch = "arm64ec"),
7544            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7545        )]
7546        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7547    }
7548    unsafe { _vcvtaq_s64_f64(a) }
7549}
7550#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7552#[inline]
7553#[cfg_attr(test, assert_instr(fcvtau))]
7554#[target_feature(enable = "neon,fp16")]
7555#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7556#[cfg(not(target_arch = "arm64ec"))]
7557pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7558    unsafe extern "unadjusted" {
7559        #[cfg_attr(
7560            any(target_arch = "aarch64", target_arch = "arm64ec"),
7561            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7562        )]
7563        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7564    }
7565    unsafe { _vcvta_u16_f16(a) }
7566}
7567#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7569#[inline]
7570#[cfg_attr(test, assert_instr(fcvtau))]
7571#[target_feature(enable = "neon,fp16")]
7572#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7573#[cfg(not(target_arch = "arm64ec"))]
7574pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7575    unsafe extern "unadjusted" {
7576        #[cfg_attr(
7577            any(target_arch = "aarch64", target_arch = "arm64ec"),
7578            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7579        )]
7580        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7581    }
7582    unsafe { _vcvtaq_u16_f16(a) }
7583}
7584#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7586#[inline]
7587#[target_feature(enable = "neon")]
7588#[cfg_attr(test, assert_instr(fcvtau))]
7589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7590pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7591    unsafe extern "unadjusted" {
7592        #[cfg_attr(
7593            any(target_arch = "aarch64", target_arch = "arm64ec"),
7594            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7595        )]
7596        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7597    }
7598    unsafe { _vcvta_u32_f32(a) }
7599}
7600#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7602#[inline]
7603#[target_feature(enable = "neon")]
7604#[cfg_attr(test, assert_instr(fcvtau))]
7605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7606pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7607    unsafe extern "unadjusted" {
7608        #[cfg_attr(
7609            any(target_arch = "aarch64", target_arch = "arm64ec"),
7610            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7611        )]
7612        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7613    }
7614    unsafe { _vcvtaq_u32_f32(a) }
7615}
7616#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7618#[inline]
7619#[target_feature(enable = "neon")]
7620#[cfg_attr(test, assert_instr(fcvtau))]
7621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7622pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7623    unsafe extern "unadjusted" {
7624        #[cfg_attr(
7625            any(target_arch = "aarch64", target_arch = "arm64ec"),
7626            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7627        )]
7628        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7629    }
7630    unsafe { _vcvta_u64_f64(a) }
7631}
7632#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7634#[inline]
7635#[target_feature(enable = "neon")]
7636#[cfg_attr(test, assert_instr(fcvtau))]
7637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7638pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7639    unsafe extern "unadjusted" {
7640        #[cfg_attr(
7641            any(target_arch = "aarch64", target_arch = "arm64ec"),
7642            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7643        )]
7644        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7645    }
7646    unsafe { _vcvtaq_u64_f64(a) }
7647}
7648#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7650#[inline]
7651#[cfg_attr(test, assert_instr(fcvtas))]
7652#[target_feature(enable = "neon,fp16")]
7653#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7654#[cfg(not(target_arch = "arm64ec"))]
7655pub fn vcvtah_s16_f16(a: f16) -> i16 {
7656    vcvtah_s32_f16(a) as i16
7657}
7658#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7660#[inline]
7661#[cfg_attr(test, assert_instr(fcvtas))]
7662#[target_feature(enable = "neon,fp16")]
7663#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7664#[cfg(not(target_arch = "arm64ec"))]
7665pub fn vcvtah_s32_f16(a: f16) -> i32 {
7666    unsafe extern "unadjusted" {
7667        #[cfg_attr(
7668            any(target_arch = "aarch64", target_arch = "arm64ec"),
7669            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7670        )]
7671        fn _vcvtah_s32_f16(a: f16) -> i32;
7672    }
7673    unsafe { _vcvtah_s32_f16(a) }
7674}
7675#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7677#[inline]
7678#[cfg_attr(test, assert_instr(fcvtas))]
7679#[target_feature(enable = "neon,fp16")]
7680#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7681#[cfg(not(target_arch = "arm64ec"))]
7682pub fn vcvtah_s64_f16(a: f16) -> i64 {
7683    unsafe extern "unadjusted" {
7684        #[cfg_attr(
7685            any(target_arch = "aarch64", target_arch = "arm64ec"),
7686            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7687        )]
7688        fn _vcvtah_s64_f16(a: f16) -> i64;
7689    }
7690    unsafe { _vcvtah_s64_f16(a) }
7691}
7692#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7694#[inline]
7695#[cfg_attr(test, assert_instr(fcvtau))]
7696#[target_feature(enable = "neon,fp16")]
7697#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7698#[cfg(not(target_arch = "arm64ec"))]
7699pub fn vcvtah_u16_f16(a: f16) -> u16 {
7700    vcvtah_u32_f16(a) as u16
7701}
7702#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7704#[inline]
7705#[cfg_attr(test, assert_instr(fcvtau))]
7706#[target_feature(enable = "neon,fp16")]
7707#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7708#[cfg(not(target_arch = "arm64ec"))]
7709pub fn vcvtah_u32_f16(a: f16) -> u32 {
7710    unsafe extern "unadjusted" {
7711        #[cfg_attr(
7712            any(target_arch = "aarch64", target_arch = "arm64ec"),
7713            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7714        )]
7715        fn _vcvtah_u32_f16(a: f16) -> u32;
7716    }
7717    unsafe { _vcvtah_u32_f16(a) }
7718}
7719#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7721#[inline]
7722#[cfg_attr(test, assert_instr(fcvtau))]
7723#[target_feature(enable = "neon,fp16")]
7724#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7725#[cfg(not(target_arch = "arm64ec"))]
7726pub fn vcvtah_u64_f16(a: f16) -> u64 {
7727    unsafe extern "unadjusted" {
7728        #[cfg_attr(
7729            any(target_arch = "aarch64", target_arch = "arm64ec"),
7730            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7731        )]
7732        fn _vcvtah_u64_f16(a: f16) -> u64;
7733    }
7734    unsafe { _vcvtah_u64_f16(a) }
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7738#[inline]
7739#[target_feature(enable = "neon")]
7740#[cfg_attr(test, assert_instr(fcvtas))]
7741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7742pub fn vcvtas_s32_f32(a: f32) -> i32 {
7743    unsafe extern "unadjusted" {
7744        #[cfg_attr(
7745            any(target_arch = "aarch64", target_arch = "arm64ec"),
7746            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7747        )]
7748        fn _vcvtas_s32_f32(a: f32) -> i32;
7749    }
7750    unsafe { _vcvtas_s32_f32(a) }
7751}
7752#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7754#[inline]
7755#[target_feature(enable = "neon")]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7758pub fn vcvtad_s64_f64(a: f64) -> i64 {
7759    unsafe extern "unadjusted" {
7760        #[cfg_attr(
7761            any(target_arch = "aarch64", target_arch = "arm64ec"),
7762            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7763        )]
7764        fn _vcvtad_s64_f64(a: f64) -> i64;
7765    }
7766    unsafe { _vcvtad_s64_f64(a) }
7767}
7768#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7770#[inline]
7771#[target_feature(enable = "neon")]
7772#[cfg_attr(test, assert_instr(fcvtau))]
7773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7774pub fn vcvtas_u32_f32(a: f32) -> u32 {
7775    unsafe extern "unadjusted" {
7776        #[cfg_attr(
7777            any(target_arch = "aarch64", target_arch = "arm64ec"),
7778            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7779        )]
7780        fn _vcvtas_u32_f32(a: f32) -> u32;
7781    }
7782    unsafe { _vcvtas_u32_f32(a) }
7783}
7784#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7786#[inline]
7787#[target_feature(enable = "neon")]
7788#[cfg_attr(test, assert_instr(fcvtau))]
7789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7790pub fn vcvtad_u64_f64(a: f64) -> u64 {
7791    unsafe extern "unadjusted" {
7792        #[cfg_attr(
7793            any(target_arch = "aarch64", target_arch = "arm64ec"),
7794            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7795        )]
7796        fn _vcvtad_u64_f64(a: f64) -> u64;
7797    }
7798    unsafe { _vcvtad_u64_f64(a) }
7799}
7800#[doc = "Fixed-point convert to floating-point"]
7801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7802#[inline]
7803#[target_feature(enable = "neon")]
7804#[cfg_attr(test, assert_instr(scvtf))]
7805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7806pub fn vcvtd_f64_s64(a: i64) -> f64 {
7807    a as f64
7808}
7809#[doc = "Fixed-point convert to floating-point"]
7810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7811#[inline]
7812#[target_feature(enable = "neon")]
7813#[cfg_attr(test, assert_instr(scvtf))]
7814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7815pub fn vcvts_f32_s32(a: i32) -> f32 {
7816    a as f32
7817}
7818#[doc = "Fixed-point convert to floating-point"]
7819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7820#[inline]
7821#[cfg_attr(test, assert_instr(scvtf))]
7822#[target_feature(enable = "neon,fp16")]
7823#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7824#[cfg(not(target_arch = "arm64ec"))]
7825pub fn vcvth_f16_s16(a: i16) -> f16 {
7826    a as f16
7827}
7828#[doc = "Fixed-point convert to floating-point"]
7829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7830#[inline]
7831#[cfg_attr(test, assert_instr(scvtf))]
7832#[target_feature(enable = "neon,fp16")]
7833#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7834#[cfg(not(target_arch = "arm64ec"))]
7835pub fn vcvth_f16_s32(a: i32) -> f16 {
7836    a as f16
7837}
7838#[doc = "Fixed-point convert to floating-point"]
7839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7840#[inline]
7841#[cfg_attr(test, assert_instr(scvtf))]
7842#[target_feature(enable = "neon,fp16")]
7843#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7844#[cfg(not(target_arch = "arm64ec"))]
7845pub fn vcvth_f16_s64(a: i64) -> f16 {
7846    a as f16
7847}
7848#[doc = "Unsigned fixed-point convert to floating-point"]
7849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7850#[inline]
7851#[cfg_attr(test, assert_instr(ucvtf))]
7852#[target_feature(enable = "neon,fp16")]
7853#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7854#[cfg(not(target_arch = "arm64ec"))]
7855pub fn vcvth_f16_u16(a: u16) -> f16 {
7856    a as f16
7857}
7858#[doc = "Unsigned fixed-point convert to floating-point"]
7859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7860#[inline]
7861#[cfg_attr(test, assert_instr(ucvtf))]
7862#[target_feature(enable = "neon,fp16")]
7863#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7864#[cfg(not(target_arch = "arm64ec"))]
7865pub fn vcvth_f16_u32(a: u32) -> f16 {
7866    a as f16
7867}
7868#[doc = "Unsigned fixed-point convert to floating-point"]
7869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7870#[inline]
7871#[cfg_attr(test, assert_instr(ucvtf))]
7872#[target_feature(enable = "neon,fp16")]
7873#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7874#[cfg(not(target_arch = "arm64ec"))]
7875pub fn vcvth_f16_u64(a: u64) -> f16 {
7876    a as f16
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7880#[inline]
7881#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7882#[rustc_legacy_const_generics(1)]
7883#[target_feature(enable = "neon,fp16")]
7884#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7885#[cfg(not(target_arch = "arm64ec"))]
7886pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7887    static_assert!(N >= 1 && N <= 16);
7888    vcvth_n_f16_s32::<N>(a as i32)
7889}
7890#[doc = "Fixed-point convert to floating-point"]
7891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7892#[inline]
7893#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7894#[rustc_legacy_const_generics(1)]
7895#[target_feature(enable = "neon,fp16")]
7896#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7897#[cfg(not(target_arch = "arm64ec"))]
7898pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7899    static_assert!(N >= 1 && N <= 16);
7900    unsafe extern "unadjusted" {
7901        #[cfg_attr(
7902            any(target_arch = "aarch64", target_arch = "arm64ec"),
7903            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7904        )]
7905        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7906    }
7907    unsafe { _vcvth_n_f16_s32(a, N) }
7908}
7909#[doc = "Fixed-point convert to floating-point"]
7910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7911#[inline]
7912#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7913#[rustc_legacy_const_generics(1)]
7914#[target_feature(enable = "neon,fp16")]
7915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7916#[cfg(not(target_arch = "arm64ec"))]
7917pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7918    static_assert!(N >= 1 && N <= 16);
7919    unsafe extern "unadjusted" {
7920        #[cfg_attr(
7921            any(target_arch = "aarch64", target_arch = "arm64ec"),
7922            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7923        )]
7924        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7925    }
7926    unsafe { _vcvth_n_f16_s64(a, N) }
7927}
7928#[doc = "Fixed-point convert to floating-point"]
7929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7930#[inline]
7931#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7932#[rustc_legacy_const_generics(1)]
7933#[target_feature(enable = "neon,fp16")]
7934#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7935#[cfg(not(target_arch = "arm64ec"))]
7936pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7937    static_assert!(N >= 1 && N <= 16);
7938    vcvth_n_f16_u32::<N>(a as u32)
7939}
7940#[doc = "Fixed-point convert to floating-point"]
7941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7942#[inline]
7943#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7944#[rustc_legacy_const_generics(1)]
7945#[target_feature(enable = "neon,fp16")]
7946#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7947#[cfg(not(target_arch = "arm64ec"))]
7948pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7949    static_assert!(N >= 1 && N <= 16);
7950    unsafe extern "unadjusted" {
7951        #[cfg_attr(
7952            any(target_arch = "aarch64", target_arch = "arm64ec"),
7953            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7954        )]
7955        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7956    }
7957    unsafe { _vcvth_n_f16_u32(a, N) }
7958}
7959#[doc = "Fixed-point convert to floating-point"]
7960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7961#[inline]
7962#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7963#[rustc_legacy_const_generics(1)]
7964#[target_feature(enable = "neon,fp16")]
7965#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7966#[cfg(not(target_arch = "arm64ec"))]
7967pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
7968    static_assert!(N >= 1 && N <= 16);
7969    unsafe extern "unadjusted" {
7970        #[cfg_attr(
7971            any(target_arch = "aarch64", target_arch = "arm64ec"),
7972            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
7973        )]
7974        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
7975    }
7976    unsafe { _vcvth_n_f16_u64(a, N) }
7977}
7978#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
7980#[inline]
7981#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7982#[rustc_legacy_const_generics(1)]
7983#[target_feature(enable = "neon,fp16")]
7984#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7985#[cfg(not(target_arch = "arm64ec"))]
7986pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
7987    static_assert!(N >= 1 && N <= 16);
7988    vcvth_n_s32_f16::<N>(a) as i16
7989}
7990#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
7992#[inline]
7993#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7994#[rustc_legacy_const_generics(1)]
7995#[target_feature(enable = "neon,fp16")]
7996#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7997#[cfg(not(target_arch = "arm64ec"))]
7998pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
7999    static_assert!(N >= 1 && N <= 16);
8000    unsafe extern "unadjusted" {
8001        #[cfg_attr(
8002            any(target_arch = "aarch64", target_arch = "arm64ec"),
8003            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8004        )]
8005        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8006    }
8007    unsafe { _vcvth_n_s32_f16(a, N) }
8008}
8009#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8011#[inline]
8012#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8013#[rustc_legacy_const_generics(1)]
8014#[target_feature(enable = "neon,fp16")]
8015#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8016#[cfg(not(target_arch = "arm64ec"))]
8017pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8018    static_assert!(N >= 1 && N <= 16);
8019    unsafe extern "unadjusted" {
8020        #[cfg_attr(
8021            any(target_arch = "aarch64", target_arch = "arm64ec"),
8022            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8023        )]
8024        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8025    }
8026    unsafe { _vcvth_n_s64_f16(a, N) }
8027}
8028#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8030#[inline]
8031#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8032#[rustc_legacy_const_generics(1)]
8033#[target_feature(enable = "neon,fp16")]
8034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8035#[cfg(not(target_arch = "arm64ec"))]
8036pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8037    static_assert!(N >= 1 && N <= 16);
8038    vcvth_n_u32_f16::<N>(a) as u16
8039}
8040#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8042#[inline]
8043#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8044#[rustc_legacy_const_generics(1)]
8045#[target_feature(enable = "neon,fp16")]
8046#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8047#[cfg(not(target_arch = "arm64ec"))]
8048pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8049    static_assert!(N >= 1 && N <= 16);
8050    unsafe extern "unadjusted" {
8051        #[cfg_attr(
8052            any(target_arch = "aarch64", target_arch = "arm64ec"),
8053            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8054        )]
8055        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8056    }
8057    unsafe { _vcvth_n_u32_f16(a, N) }
8058}
8059#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8061#[inline]
8062#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8063#[rustc_legacy_const_generics(1)]
8064#[target_feature(enable = "neon,fp16")]
8065#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8066#[cfg(not(target_arch = "arm64ec"))]
8067pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8068    static_assert!(N >= 1 && N <= 16);
8069    unsafe extern "unadjusted" {
8070        #[cfg_attr(
8071            any(target_arch = "aarch64", target_arch = "arm64ec"),
8072            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8073        )]
8074        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8075    }
8076    unsafe { _vcvth_n_u64_f16(a, N) }
8077}
8078#[doc = "Floating-point convert to signed fixed-point"]
8079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8080#[inline]
8081#[cfg_attr(test, assert_instr(fcvtzs))]
8082#[target_feature(enable = "neon,fp16")]
8083#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8084#[cfg(not(target_arch = "arm64ec"))]
8085pub fn vcvth_s16_f16(a: f16) -> i16 {
8086    a as i16
8087}
8088#[doc = "Floating-point convert to signed fixed-point"]
8089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8090#[inline]
8091#[cfg_attr(test, assert_instr(fcvtzs))]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_s32_f16(a: f16) -> i32 {
8096    a as i32
8097}
8098#[doc = "Floating-point convert to signed fixed-point"]
8099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8100#[inline]
8101#[cfg_attr(test, assert_instr(fcvtzs))]
8102#[target_feature(enable = "neon,fp16")]
8103#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8104#[cfg(not(target_arch = "arm64ec"))]
8105pub fn vcvth_s64_f16(a: f16) -> i64 {
8106    a as i64
8107}
8108#[doc = "Floating-point convert to unsigned fixed-point"]
8109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8110#[inline]
8111#[cfg_attr(test, assert_instr(fcvtzu))]
8112#[target_feature(enable = "neon,fp16")]
8113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8114#[cfg(not(target_arch = "arm64ec"))]
8115pub fn vcvth_u16_f16(a: f16) -> u16 {
8116    a as u16
8117}
8118#[doc = "Floating-point convert to unsigned fixed-point"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8120#[inline]
8121#[cfg_attr(test, assert_instr(fcvtzu))]
8122#[target_feature(enable = "neon,fp16")]
8123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8124#[cfg(not(target_arch = "arm64ec"))]
8125pub fn vcvth_u32_f16(a: f16) -> u32 {
8126    a as u32
8127}
8128#[doc = "Floating-point convert to unsigned fixed-point"]
8129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8130#[inline]
8131#[cfg_attr(test, assert_instr(fcvtzu))]
8132#[target_feature(enable = "neon,fp16")]
8133#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8134#[cfg(not(target_arch = "arm64ec"))]
8135pub fn vcvth_u64_f16(a: f16) -> u64 {
8136    a as u64
8137}
8138#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8140#[inline]
8141#[cfg_attr(test, assert_instr(fcvtms))]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8146    unsafe extern "unadjusted" {
8147        #[cfg_attr(
8148            any(target_arch = "aarch64", target_arch = "arm64ec"),
8149            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8150        )]
8151        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8152    }
8153    unsafe { _vcvtm_s16_f16(a) }
8154}
8155#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8157#[inline]
8158#[cfg_attr(test, assert_instr(fcvtms))]
8159#[target_feature(enable = "neon,fp16")]
8160#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8161#[cfg(not(target_arch = "arm64ec"))]
8162pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8163    unsafe extern "unadjusted" {
8164        #[cfg_attr(
8165            any(target_arch = "aarch64", target_arch = "arm64ec"),
8166            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8167        )]
8168        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8169    }
8170    unsafe { _vcvtmq_s16_f16(a) }
8171}
8172#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8174#[inline]
8175#[target_feature(enable = "neon")]
8176#[cfg_attr(test, assert_instr(fcvtms))]
8177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8178pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8179    unsafe extern "unadjusted" {
8180        #[cfg_attr(
8181            any(target_arch = "aarch64", target_arch = "arm64ec"),
8182            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8183        )]
8184        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8185    }
8186    unsafe { _vcvtm_s32_f32(a) }
8187}
8188#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8190#[inline]
8191#[target_feature(enable = "neon")]
8192#[cfg_attr(test, assert_instr(fcvtms))]
8193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8194pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8195    unsafe extern "unadjusted" {
8196        #[cfg_attr(
8197            any(target_arch = "aarch64", target_arch = "arm64ec"),
8198            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8199        )]
8200        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8201    }
8202    unsafe { _vcvtmq_s32_f32(a) }
8203}
8204#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8206#[inline]
8207#[target_feature(enable = "neon")]
8208#[cfg_attr(test, assert_instr(fcvtms))]
8209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8210pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8211    unsafe extern "unadjusted" {
8212        #[cfg_attr(
8213            any(target_arch = "aarch64", target_arch = "arm64ec"),
8214            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8215        )]
8216        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8217    }
8218    unsafe { _vcvtm_s64_f64(a) }
8219}
8220#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8222#[inline]
8223#[target_feature(enable = "neon")]
8224#[cfg_attr(test, assert_instr(fcvtms))]
8225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8226pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8227    unsafe extern "unadjusted" {
8228        #[cfg_attr(
8229            any(target_arch = "aarch64", target_arch = "arm64ec"),
8230            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8231        )]
8232        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8233    }
8234    unsafe { _vcvtmq_s64_f64(a) }
8235}
8236#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8238#[inline]
8239#[cfg_attr(test, assert_instr(fcvtmu))]
8240#[target_feature(enable = "neon,fp16")]
8241#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8242#[cfg(not(target_arch = "arm64ec"))]
8243pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8244    unsafe extern "unadjusted" {
8245        #[cfg_attr(
8246            any(target_arch = "aarch64", target_arch = "arm64ec"),
8247            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8248        )]
8249        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8250    }
8251    unsafe { _vcvtm_u16_f16(a) }
8252}
8253#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8255#[inline]
8256#[cfg_attr(test, assert_instr(fcvtmu))]
8257#[target_feature(enable = "neon,fp16")]
8258#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8259#[cfg(not(target_arch = "arm64ec"))]
8260pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8261    unsafe extern "unadjusted" {
8262        #[cfg_attr(
8263            any(target_arch = "aarch64", target_arch = "arm64ec"),
8264            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8265        )]
8266        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8267    }
8268    unsafe { _vcvtmq_u16_f16(a) }
8269}
8270#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8272#[inline]
8273#[target_feature(enable = "neon")]
8274#[cfg_attr(test, assert_instr(fcvtmu))]
8275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8276pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8277    unsafe extern "unadjusted" {
8278        #[cfg_attr(
8279            any(target_arch = "aarch64", target_arch = "arm64ec"),
8280            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8281        )]
8282        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8283    }
8284    unsafe { _vcvtm_u32_f32(a) }
8285}
8286#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8288#[inline]
8289#[target_feature(enable = "neon")]
8290#[cfg_attr(test, assert_instr(fcvtmu))]
8291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8292pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8293    unsafe extern "unadjusted" {
8294        #[cfg_attr(
8295            any(target_arch = "aarch64", target_arch = "arm64ec"),
8296            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8297        )]
8298        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8299    }
8300    unsafe { _vcvtmq_u32_f32(a) }
8301}
8302#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8304#[inline]
8305#[target_feature(enable = "neon")]
8306#[cfg_attr(test, assert_instr(fcvtmu))]
8307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8308pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8309    unsafe extern "unadjusted" {
8310        #[cfg_attr(
8311            any(target_arch = "aarch64", target_arch = "arm64ec"),
8312            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8313        )]
8314        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8315    }
8316    unsafe { _vcvtm_u64_f64(a) }
8317}
8318#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8320#[inline]
8321#[target_feature(enable = "neon")]
8322#[cfg_attr(test, assert_instr(fcvtmu))]
8323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8324pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8325    unsafe extern "unadjusted" {
8326        #[cfg_attr(
8327            any(target_arch = "aarch64", target_arch = "arm64ec"),
8328            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8329        )]
8330        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8331    }
8332    unsafe { _vcvtmq_u64_f64(a) }
8333}
8334#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8336#[inline]
8337#[cfg_attr(test, assert_instr(fcvtms))]
8338#[target_feature(enable = "neon,fp16")]
8339#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8340#[cfg(not(target_arch = "arm64ec"))]
8341pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8342    vcvtmh_s32_f16(a) as i16
8343}
8344#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8346#[inline]
8347#[cfg_attr(test, assert_instr(fcvtms))]
8348#[target_feature(enable = "neon,fp16")]
8349#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8350#[cfg(not(target_arch = "arm64ec"))]
8351pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8352    unsafe extern "unadjusted" {
8353        #[cfg_attr(
8354            any(target_arch = "aarch64", target_arch = "arm64ec"),
8355            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8356        )]
8357        fn _vcvtmh_s32_f16(a: f16) -> i32;
8358    }
8359    unsafe { _vcvtmh_s32_f16(a) }
8360}
8361#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8363#[inline]
8364#[cfg_attr(test, assert_instr(fcvtms))]
8365#[target_feature(enable = "neon,fp16")]
8366#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8367#[cfg(not(target_arch = "arm64ec"))]
8368pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8369    unsafe extern "unadjusted" {
8370        #[cfg_attr(
8371            any(target_arch = "aarch64", target_arch = "arm64ec"),
8372            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8373        )]
8374        fn _vcvtmh_s64_f16(a: f16) -> i64;
8375    }
8376    unsafe { _vcvtmh_s64_f16(a) }
8377}
8378#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8380#[inline]
8381#[cfg_attr(test, assert_instr(fcvtmu))]
8382#[target_feature(enable = "neon,fp16")]
8383#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8384#[cfg(not(target_arch = "arm64ec"))]
8385pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8386    vcvtmh_u32_f16(a) as u16
8387}
8388#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8390#[inline]
8391#[cfg_attr(test, assert_instr(fcvtmu))]
8392#[target_feature(enable = "neon,fp16")]
8393#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8394#[cfg(not(target_arch = "arm64ec"))]
8395pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8396    unsafe extern "unadjusted" {
8397        #[cfg_attr(
8398            any(target_arch = "aarch64", target_arch = "arm64ec"),
8399            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8400        )]
8401        fn _vcvtmh_u32_f16(a: f16) -> u32;
8402    }
8403    unsafe { _vcvtmh_u32_f16(a) }
8404}
8405#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8407#[inline]
8408#[cfg_attr(test, assert_instr(fcvtmu))]
8409#[target_feature(enable = "neon,fp16")]
8410#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8411#[cfg(not(target_arch = "arm64ec"))]
8412pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8413    unsafe extern "unadjusted" {
8414        #[cfg_attr(
8415            any(target_arch = "aarch64", target_arch = "arm64ec"),
8416            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8417        )]
8418        fn _vcvtmh_u64_f16(a: f16) -> u64;
8419    }
8420    unsafe { _vcvtmh_u64_f16(a) }
8421}
8422#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8424#[inline]
8425#[target_feature(enable = "neon")]
8426#[cfg_attr(test, assert_instr(fcvtms))]
8427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8428pub fn vcvtms_s32_f32(a: f32) -> i32 {
8429    unsafe extern "unadjusted" {
8430        #[cfg_attr(
8431            any(target_arch = "aarch64", target_arch = "arm64ec"),
8432            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8433        )]
8434        fn _vcvtms_s32_f32(a: f32) -> i32;
8435    }
8436    unsafe { _vcvtms_s32_f32(a) }
8437}
8438#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8440#[inline]
8441#[target_feature(enable = "neon")]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8444pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8445    unsafe extern "unadjusted" {
8446        #[cfg_attr(
8447            any(target_arch = "aarch64", target_arch = "arm64ec"),
8448            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8449        )]
8450        fn _vcvtmd_s64_f64(a: f64) -> i64;
8451    }
8452    unsafe { _vcvtmd_s64_f64(a) }
8453}
8454#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8456#[inline]
8457#[target_feature(enable = "neon")]
8458#[cfg_attr(test, assert_instr(fcvtmu))]
8459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8460pub fn vcvtms_u32_f32(a: f32) -> u32 {
8461    unsafe extern "unadjusted" {
8462        #[cfg_attr(
8463            any(target_arch = "aarch64", target_arch = "arm64ec"),
8464            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8465        )]
8466        fn _vcvtms_u32_f32(a: f32) -> u32;
8467    }
8468    unsafe { _vcvtms_u32_f32(a) }
8469}
8470#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8472#[inline]
8473#[target_feature(enable = "neon")]
8474#[cfg_attr(test, assert_instr(fcvtmu))]
8475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8476pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8477    unsafe extern "unadjusted" {
8478        #[cfg_attr(
8479            any(target_arch = "aarch64", target_arch = "arm64ec"),
8480            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8481        )]
8482        fn _vcvtmd_u64_f64(a: f64) -> u64;
8483    }
8484    unsafe { _vcvtmd_u64_f64(a) }
8485}
8486#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8488#[inline]
8489#[cfg_attr(test, assert_instr(fcvtns))]
8490#[target_feature(enable = "neon,fp16")]
8491#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8492#[cfg(not(target_arch = "arm64ec"))]
8493pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8494    unsafe extern "unadjusted" {
8495        #[cfg_attr(
8496            any(target_arch = "aarch64", target_arch = "arm64ec"),
8497            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8498        )]
8499        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8500    }
8501    unsafe { _vcvtn_s16_f16(a) }
8502}
8503#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8505#[inline]
8506#[cfg_attr(test, assert_instr(fcvtns))]
8507#[target_feature(enable = "neon,fp16")]
8508#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8509#[cfg(not(target_arch = "arm64ec"))]
8510pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8511    unsafe extern "unadjusted" {
8512        #[cfg_attr(
8513            any(target_arch = "aarch64", target_arch = "arm64ec"),
8514            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8515        )]
8516        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8517    }
8518    unsafe { _vcvtnq_s16_f16(a) }
8519}
8520#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8522#[inline]
8523#[target_feature(enable = "neon")]
8524#[cfg_attr(test, assert_instr(fcvtns))]
8525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8526pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8527    unsafe extern "unadjusted" {
8528        #[cfg_attr(
8529            any(target_arch = "aarch64", target_arch = "arm64ec"),
8530            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8531        )]
8532        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8533    }
8534    unsafe { _vcvtn_s32_f32(a) }
8535}
8536#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8538#[inline]
8539#[target_feature(enable = "neon")]
8540#[cfg_attr(test, assert_instr(fcvtns))]
8541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8542pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8543    unsafe extern "unadjusted" {
8544        #[cfg_attr(
8545            any(target_arch = "aarch64", target_arch = "arm64ec"),
8546            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8547        )]
8548        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8549    }
8550    unsafe { _vcvtnq_s32_f32(a) }
8551}
8552#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8554#[inline]
8555#[target_feature(enable = "neon")]
8556#[cfg_attr(test, assert_instr(fcvtns))]
8557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8558pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8559    unsafe extern "unadjusted" {
8560        #[cfg_attr(
8561            any(target_arch = "aarch64", target_arch = "arm64ec"),
8562            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8563        )]
8564        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8565    }
8566    unsafe { _vcvtn_s64_f64(a) }
8567}
8568#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8570#[inline]
8571#[target_feature(enable = "neon")]
8572#[cfg_attr(test, assert_instr(fcvtns))]
8573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8574pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8575    unsafe extern "unadjusted" {
8576        #[cfg_attr(
8577            any(target_arch = "aarch64", target_arch = "arm64ec"),
8578            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8579        )]
8580        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8581    }
8582    unsafe { _vcvtnq_s64_f64(a) }
8583}
8584#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8586#[inline]
8587#[cfg_attr(test, assert_instr(fcvtnu))]
8588#[target_feature(enable = "neon,fp16")]
8589#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8590#[cfg(not(target_arch = "arm64ec"))]
8591pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8592    unsafe extern "unadjusted" {
8593        #[cfg_attr(
8594            any(target_arch = "aarch64", target_arch = "arm64ec"),
8595            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8596        )]
8597        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8598    }
8599    unsafe { _vcvtn_u16_f16(a) }
8600}
8601#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8603#[inline]
8604#[cfg_attr(test, assert_instr(fcvtnu))]
8605#[target_feature(enable = "neon,fp16")]
8606#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8607#[cfg(not(target_arch = "arm64ec"))]
8608pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8609    unsafe extern "unadjusted" {
8610        #[cfg_attr(
8611            any(target_arch = "aarch64", target_arch = "arm64ec"),
8612            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8613        )]
8614        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8615    }
8616    unsafe { _vcvtnq_u16_f16(a) }
8617}
8618#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8620#[inline]
8621#[target_feature(enable = "neon")]
8622#[cfg_attr(test, assert_instr(fcvtnu))]
8623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8624pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8625    unsafe extern "unadjusted" {
8626        #[cfg_attr(
8627            any(target_arch = "aarch64", target_arch = "arm64ec"),
8628            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8629        )]
8630        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8631    }
8632    unsafe { _vcvtn_u32_f32(a) }
8633}
8634#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8636#[inline]
8637#[target_feature(enable = "neon")]
8638#[cfg_attr(test, assert_instr(fcvtnu))]
8639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8640pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8641    unsafe extern "unadjusted" {
8642        #[cfg_attr(
8643            any(target_arch = "aarch64", target_arch = "arm64ec"),
8644            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8645        )]
8646        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8647    }
8648    unsafe { _vcvtnq_u32_f32(a) }
8649}
8650#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8652#[inline]
8653#[target_feature(enable = "neon")]
8654#[cfg_attr(test, assert_instr(fcvtnu))]
8655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8656pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8657    unsafe extern "unadjusted" {
8658        #[cfg_attr(
8659            any(target_arch = "aarch64", target_arch = "arm64ec"),
8660            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8661        )]
8662        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8663    }
8664    unsafe { _vcvtn_u64_f64(a) }
8665}
8666#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8668#[inline]
8669#[target_feature(enable = "neon")]
8670#[cfg_attr(test, assert_instr(fcvtnu))]
8671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8672pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8673    unsafe extern "unadjusted" {
8674        #[cfg_attr(
8675            any(target_arch = "aarch64", target_arch = "arm64ec"),
8676            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8677        )]
8678        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8679    }
8680    unsafe { _vcvtnq_u64_f64(a) }
8681}
8682#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8684#[inline]
8685#[cfg_attr(test, assert_instr(fcvtns))]
8686#[target_feature(enable = "neon,fp16")]
8687#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8688#[cfg(not(target_arch = "arm64ec"))]
8689pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8690    vcvtnh_s32_f16(a) as i16
8691}
8692#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8694#[inline]
8695#[cfg_attr(test, assert_instr(fcvtns))]
8696#[target_feature(enable = "neon,fp16")]
8697#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8698#[cfg(not(target_arch = "arm64ec"))]
8699pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8700    unsafe extern "unadjusted" {
8701        #[cfg_attr(
8702            any(target_arch = "aarch64", target_arch = "arm64ec"),
8703            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8704        )]
8705        fn _vcvtnh_s32_f16(a: f16) -> i32;
8706    }
8707    unsafe { _vcvtnh_s32_f16(a) }
8708}
8709#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8711#[inline]
8712#[cfg_attr(test, assert_instr(fcvtns))]
8713#[target_feature(enable = "neon,fp16")]
8714#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8715#[cfg(not(target_arch = "arm64ec"))]
8716pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8717    unsafe extern "unadjusted" {
8718        #[cfg_attr(
8719            any(target_arch = "aarch64", target_arch = "arm64ec"),
8720            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8721        )]
8722        fn _vcvtnh_s64_f16(a: f16) -> i64;
8723    }
8724    unsafe { _vcvtnh_s64_f16(a) }
8725}
8726#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8728#[inline]
8729#[cfg_attr(test, assert_instr(fcvtnu))]
8730#[target_feature(enable = "neon,fp16")]
8731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8732#[cfg(not(target_arch = "arm64ec"))]
8733pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8734    vcvtnh_u32_f16(a) as u16
8735}
8736#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8738#[inline]
8739#[cfg_attr(test, assert_instr(fcvtnu))]
8740#[target_feature(enable = "neon,fp16")]
8741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8742#[cfg(not(target_arch = "arm64ec"))]
8743pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8744    unsafe extern "unadjusted" {
8745        #[cfg_attr(
8746            any(target_arch = "aarch64", target_arch = "arm64ec"),
8747            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8748        )]
8749        fn _vcvtnh_u32_f16(a: f16) -> u32;
8750    }
8751    unsafe { _vcvtnh_u32_f16(a) }
8752}
8753#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8755#[inline]
8756#[cfg_attr(test, assert_instr(fcvtnu))]
8757#[target_feature(enable = "neon,fp16")]
8758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8759#[cfg(not(target_arch = "arm64ec"))]
8760pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8761    unsafe extern "unadjusted" {
8762        #[cfg_attr(
8763            any(target_arch = "aarch64", target_arch = "arm64ec"),
8764            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8765        )]
8766        fn _vcvtnh_u64_f16(a: f16) -> u64;
8767    }
8768    unsafe { _vcvtnh_u64_f16(a) }
8769}
8770#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8772#[inline]
8773#[target_feature(enable = "neon")]
8774#[cfg_attr(test, assert_instr(fcvtns))]
8775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8776pub fn vcvtns_s32_f32(a: f32) -> i32 {
8777    unsafe extern "unadjusted" {
8778        #[cfg_attr(
8779            any(target_arch = "aarch64", target_arch = "arm64ec"),
8780            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8781        )]
8782        fn _vcvtns_s32_f32(a: f32) -> i32;
8783    }
8784    unsafe { _vcvtns_s32_f32(a) }
8785}
8786#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8788#[inline]
8789#[target_feature(enable = "neon")]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8792pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8793    unsafe extern "unadjusted" {
8794        #[cfg_attr(
8795            any(target_arch = "aarch64", target_arch = "arm64ec"),
8796            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8797        )]
8798        fn _vcvtnd_s64_f64(a: f64) -> i64;
8799    }
8800    unsafe { _vcvtnd_s64_f64(a) }
8801}
8802#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8804#[inline]
8805#[target_feature(enable = "neon")]
8806#[cfg_attr(test, assert_instr(fcvtnu))]
8807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8808pub fn vcvtns_u32_f32(a: f32) -> u32 {
8809    unsafe extern "unadjusted" {
8810        #[cfg_attr(
8811            any(target_arch = "aarch64", target_arch = "arm64ec"),
8812            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8813        )]
8814        fn _vcvtns_u32_f32(a: f32) -> u32;
8815    }
8816    unsafe { _vcvtns_u32_f32(a) }
8817}
8818#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8820#[inline]
8821#[target_feature(enable = "neon")]
8822#[cfg_attr(test, assert_instr(fcvtnu))]
8823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8824pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8825    unsafe extern "unadjusted" {
8826        #[cfg_attr(
8827            any(target_arch = "aarch64", target_arch = "arm64ec"),
8828            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8829        )]
8830        fn _vcvtnd_u64_f64(a: f64) -> u64;
8831    }
8832    unsafe { _vcvtnd_u64_f64(a) }
8833}
8834#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8836#[inline]
8837#[cfg_attr(test, assert_instr(fcvtps))]
8838#[target_feature(enable = "neon,fp16")]
8839#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8840#[cfg(not(target_arch = "arm64ec"))]
8841pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8842    unsafe extern "unadjusted" {
8843        #[cfg_attr(
8844            any(target_arch = "aarch64", target_arch = "arm64ec"),
8845            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8846        )]
8847        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8848    }
8849    unsafe { _vcvtp_s16_f16(a) }
8850}
8851#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8853#[inline]
8854#[cfg_attr(test, assert_instr(fcvtps))]
8855#[target_feature(enable = "neon,fp16")]
8856#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8857#[cfg(not(target_arch = "arm64ec"))]
8858pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8859    unsafe extern "unadjusted" {
8860        #[cfg_attr(
8861            any(target_arch = "aarch64", target_arch = "arm64ec"),
8862            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8863        )]
8864        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8865    }
8866    unsafe { _vcvtpq_s16_f16(a) }
8867}
8868#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8870#[inline]
8871#[target_feature(enable = "neon")]
8872#[cfg_attr(test, assert_instr(fcvtps))]
8873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8874pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8875    unsafe extern "unadjusted" {
8876        #[cfg_attr(
8877            any(target_arch = "aarch64", target_arch = "arm64ec"),
8878            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8879        )]
8880        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8881    }
8882    unsafe { _vcvtp_s32_f32(a) }
8883}
8884#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8886#[inline]
8887#[target_feature(enable = "neon")]
8888#[cfg_attr(test, assert_instr(fcvtps))]
8889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8890pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8891    unsafe extern "unadjusted" {
8892        #[cfg_attr(
8893            any(target_arch = "aarch64", target_arch = "arm64ec"),
8894            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8895        )]
8896        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8897    }
8898    unsafe { _vcvtpq_s32_f32(a) }
8899}
8900#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8902#[inline]
8903#[target_feature(enable = "neon")]
8904#[cfg_attr(test, assert_instr(fcvtps))]
8905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8906pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8907    unsafe extern "unadjusted" {
8908        #[cfg_attr(
8909            any(target_arch = "aarch64", target_arch = "arm64ec"),
8910            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8911        )]
8912        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8913    }
8914    unsafe { _vcvtp_s64_f64(a) }
8915}
8916#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8918#[inline]
8919#[target_feature(enable = "neon")]
8920#[cfg_attr(test, assert_instr(fcvtps))]
8921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8922pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8923    unsafe extern "unadjusted" {
8924        #[cfg_attr(
8925            any(target_arch = "aarch64", target_arch = "arm64ec"),
8926            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8927        )]
8928        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8929    }
8930    unsafe { _vcvtpq_s64_f64(a) }
8931}
8932#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8934#[inline]
8935#[cfg_attr(test, assert_instr(fcvtpu))]
8936#[target_feature(enable = "neon,fp16")]
8937#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8938#[cfg(not(target_arch = "arm64ec"))]
8939pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8940    unsafe extern "unadjusted" {
8941        #[cfg_attr(
8942            any(target_arch = "aarch64", target_arch = "arm64ec"),
8943            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8944        )]
8945        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8946    }
8947    unsafe { _vcvtp_u16_f16(a) }
8948}
8949#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8951#[inline]
8952#[cfg_attr(test, assert_instr(fcvtpu))]
8953#[target_feature(enable = "neon,fp16")]
8954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8955#[cfg(not(target_arch = "arm64ec"))]
8956pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8957    unsafe extern "unadjusted" {
8958        #[cfg_attr(
8959            any(target_arch = "aarch64", target_arch = "arm64ec"),
8960            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8961        )]
8962        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8963    }
8964    unsafe { _vcvtpq_u16_f16(a) }
8965}
8966#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8968#[inline]
8969#[target_feature(enable = "neon")]
8970#[cfg_attr(test, assert_instr(fcvtpu))]
8971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8972pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8973    unsafe extern "unadjusted" {
8974        #[cfg_attr(
8975            any(target_arch = "aarch64", target_arch = "arm64ec"),
8976            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8977        )]
8978        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8979    }
8980    unsafe { _vcvtp_u32_f32(a) }
8981}
8982#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8984#[inline]
8985#[target_feature(enable = "neon")]
8986#[cfg_attr(test, assert_instr(fcvtpu))]
8987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8988pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
8989    unsafe extern "unadjusted" {
8990        #[cfg_attr(
8991            any(target_arch = "aarch64", target_arch = "arm64ec"),
8992            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
8993        )]
8994        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
8995    }
8996    unsafe { _vcvtpq_u32_f32(a) }
8997}
8998#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9000#[inline]
9001#[target_feature(enable = "neon")]
9002#[cfg_attr(test, assert_instr(fcvtpu))]
9003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9004pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9005    unsafe extern "unadjusted" {
9006        #[cfg_attr(
9007            any(target_arch = "aarch64", target_arch = "arm64ec"),
9008            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9009        )]
9010        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9011    }
9012    unsafe { _vcvtp_u64_f64(a) }
9013}
9014#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9016#[inline]
9017#[target_feature(enable = "neon")]
9018#[cfg_attr(test, assert_instr(fcvtpu))]
9019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9020pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9021    unsafe extern "unadjusted" {
9022        #[cfg_attr(
9023            any(target_arch = "aarch64", target_arch = "arm64ec"),
9024            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9025        )]
9026        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9027    }
9028    unsafe { _vcvtpq_u64_f64(a) }
9029}
9030#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9032#[inline]
9033#[cfg_attr(test, assert_instr(fcvtps))]
9034#[target_feature(enable = "neon,fp16")]
9035#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9036#[cfg(not(target_arch = "arm64ec"))]
9037pub fn vcvtph_s16_f16(a: f16) -> i16 {
9038    vcvtph_s32_f16(a) as i16
9039}
9040#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9042#[inline]
9043#[cfg_attr(test, assert_instr(fcvtps))]
9044#[target_feature(enable = "neon,fp16")]
9045#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9046#[cfg(not(target_arch = "arm64ec"))]
9047pub fn vcvtph_s32_f16(a: f16) -> i32 {
9048    unsafe extern "unadjusted" {
9049        #[cfg_attr(
9050            any(target_arch = "aarch64", target_arch = "arm64ec"),
9051            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9052        )]
9053        fn _vcvtph_s32_f16(a: f16) -> i32;
9054    }
9055    unsafe { _vcvtph_s32_f16(a) }
9056}
9057#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9059#[inline]
9060#[cfg_attr(test, assert_instr(fcvtps))]
9061#[target_feature(enable = "neon,fp16")]
9062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9063#[cfg(not(target_arch = "arm64ec"))]
9064pub fn vcvtph_s64_f16(a: f16) -> i64 {
9065    unsafe extern "unadjusted" {
9066        #[cfg_attr(
9067            any(target_arch = "aarch64", target_arch = "arm64ec"),
9068            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9069        )]
9070        fn _vcvtph_s64_f16(a: f16) -> i64;
9071    }
9072    unsafe { _vcvtph_s64_f16(a) }
9073}
9074#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9076#[inline]
9077#[cfg_attr(test, assert_instr(fcvtpu))]
9078#[target_feature(enable = "neon,fp16")]
9079#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9080#[cfg(not(target_arch = "arm64ec"))]
9081pub fn vcvtph_u16_f16(a: f16) -> u16 {
9082    vcvtph_u32_f16(a) as u16
9083}
9084#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9086#[inline]
9087#[cfg_attr(test, assert_instr(fcvtpu))]
9088#[target_feature(enable = "neon,fp16")]
9089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9090#[cfg(not(target_arch = "arm64ec"))]
9091pub fn vcvtph_u32_f16(a: f16) -> u32 {
9092    unsafe extern "unadjusted" {
9093        #[cfg_attr(
9094            any(target_arch = "aarch64", target_arch = "arm64ec"),
9095            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9096        )]
9097        fn _vcvtph_u32_f16(a: f16) -> u32;
9098    }
9099    unsafe { _vcvtph_u32_f16(a) }
9100}
9101#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9103#[inline]
9104#[cfg_attr(test, assert_instr(fcvtpu))]
9105#[target_feature(enable = "neon,fp16")]
9106#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9107#[cfg(not(target_arch = "arm64ec"))]
9108pub fn vcvtph_u64_f16(a: f16) -> u64 {
9109    unsafe extern "unadjusted" {
9110        #[cfg_attr(
9111            any(target_arch = "aarch64", target_arch = "arm64ec"),
9112            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9113        )]
9114        fn _vcvtph_u64_f16(a: f16) -> u64;
9115    }
9116    unsafe { _vcvtph_u64_f16(a) }
9117}
9118#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9120#[inline]
9121#[target_feature(enable = "neon")]
9122#[cfg_attr(test, assert_instr(fcvtps))]
9123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9124pub fn vcvtps_s32_f32(a: f32) -> i32 {
9125    unsafe extern "unadjusted" {
9126        #[cfg_attr(
9127            any(target_arch = "aarch64", target_arch = "arm64ec"),
9128            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9129        )]
9130        fn _vcvtps_s32_f32(a: f32) -> i32;
9131    }
9132    unsafe { _vcvtps_s32_f32(a) }
9133}
9134#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9136#[inline]
9137#[target_feature(enable = "neon")]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9140pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9141    unsafe extern "unadjusted" {
9142        #[cfg_attr(
9143            any(target_arch = "aarch64", target_arch = "arm64ec"),
9144            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9145        )]
9146        fn _vcvtpd_s64_f64(a: f64) -> i64;
9147    }
9148    unsafe { _vcvtpd_s64_f64(a) }
9149}
9150#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9152#[inline]
9153#[target_feature(enable = "neon")]
9154#[cfg_attr(test, assert_instr(fcvtpu))]
9155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9156pub fn vcvtps_u32_f32(a: f32) -> u32 {
9157    unsafe extern "unadjusted" {
9158        #[cfg_attr(
9159            any(target_arch = "aarch64", target_arch = "arm64ec"),
9160            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9161        )]
9162        fn _vcvtps_u32_f32(a: f32) -> u32;
9163    }
9164    unsafe { _vcvtps_u32_f32(a) }
9165}
9166#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9168#[inline]
9169#[target_feature(enable = "neon")]
9170#[cfg_attr(test, assert_instr(fcvtpu))]
9171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9172pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9173    unsafe extern "unadjusted" {
9174        #[cfg_attr(
9175            any(target_arch = "aarch64", target_arch = "arm64ec"),
9176            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9177        )]
9178        fn _vcvtpd_u64_f64(a: f64) -> u64;
9179    }
9180    unsafe { _vcvtpd_u64_f64(a) }
9181}
9182#[doc = "Fixed-point convert to floating-point"]
9183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9184#[inline]
9185#[target_feature(enable = "neon")]
9186#[cfg_attr(test, assert_instr(ucvtf))]
9187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9188pub fn vcvts_f32_u32(a: u32) -> f32 {
9189    a as f32
9190}
9191#[doc = "Fixed-point convert to floating-point"]
9192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9193#[inline]
9194#[target_feature(enable = "neon")]
9195#[cfg_attr(test, assert_instr(ucvtf))]
9196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9197pub fn vcvtd_f64_u64(a: u64) -> f64 {
9198    a as f64
9199}
9200#[doc = "Fixed-point convert to floating-point"]
9201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9202#[inline]
9203#[target_feature(enable = "neon")]
9204#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9205#[rustc_legacy_const_generics(1)]
9206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9207pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9208    static_assert!(N >= 1 && N <= 64);
9209    unsafe extern "unadjusted" {
9210        #[cfg_attr(
9211            any(target_arch = "aarch64", target_arch = "arm64ec"),
9212            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9213        )]
9214        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9215    }
9216    unsafe { _vcvts_n_f32_s32(a, N) }
9217}
9218#[doc = "Fixed-point convert to floating-point"]
9219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9220#[inline]
9221#[target_feature(enable = "neon")]
9222#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9223#[rustc_legacy_const_generics(1)]
9224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9225pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9226    static_assert!(N >= 1 && N <= 64);
9227    unsafe extern "unadjusted" {
9228        #[cfg_attr(
9229            any(target_arch = "aarch64", target_arch = "arm64ec"),
9230            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9231        )]
9232        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9233    }
9234    unsafe { _vcvtd_n_f64_s64(a, N) }
9235}
9236#[doc = "Fixed-point convert to floating-point"]
9237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9238#[inline]
9239#[target_feature(enable = "neon")]
9240#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9241#[rustc_legacy_const_generics(1)]
9242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9243pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9244    static_assert!(N >= 1 && N <= 32);
9245    unsafe extern "unadjusted" {
9246        #[cfg_attr(
9247            any(target_arch = "aarch64", target_arch = "arm64ec"),
9248            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9249        )]
9250        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9251    }
9252    unsafe { _vcvts_n_f32_u32(a, N) }
9253}
9254#[doc = "Fixed-point convert to floating-point"]
9255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9256#[inline]
9257#[target_feature(enable = "neon")]
9258#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9259#[rustc_legacy_const_generics(1)]
9260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9261pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9262    static_assert!(N >= 1 && N <= 64);
9263    unsafe extern "unadjusted" {
9264        #[cfg_attr(
9265            any(target_arch = "aarch64", target_arch = "arm64ec"),
9266            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9267        )]
9268        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9269    }
9270    unsafe { _vcvtd_n_f64_u64(a, N) }
9271}
9272#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9274#[inline]
9275#[target_feature(enable = "neon")]
9276#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9277#[rustc_legacy_const_generics(1)]
9278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9279pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9280    static_assert!(N >= 1 && N <= 32);
9281    unsafe extern "unadjusted" {
9282        #[cfg_attr(
9283            any(target_arch = "aarch64", target_arch = "arm64ec"),
9284            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9285        )]
9286        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9287    }
9288    unsafe { _vcvts_n_s32_f32(a, N) }
9289}
9290#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9292#[inline]
9293#[target_feature(enable = "neon")]
9294#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9295#[rustc_legacy_const_generics(1)]
9296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9297pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9298    static_assert!(N >= 1 && N <= 64);
9299    unsafe extern "unadjusted" {
9300        #[cfg_attr(
9301            any(target_arch = "aarch64", target_arch = "arm64ec"),
9302            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9303        )]
9304        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9305    }
9306    unsafe { _vcvtd_n_s64_f64(a, N) }
9307}
9308#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9310#[inline]
9311#[target_feature(enable = "neon")]
9312#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9313#[rustc_legacy_const_generics(1)]
9314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9315pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9316    static_assert!(N >= 1 && N <= 32);
9317    unsafe extern "unadjusted" {
9318        #[cfg_attr(
9319            any(target_arch = "aarch64", target_arch = "arm64ec"),
9320            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9321        )]
9322        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9323    }
9324    unsafe { _vcvts_n_u32_f32(a, N) }
9325}
9326#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9328#[inline]
9329#[target_feature(enable = "neon")]
9330#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9331#[rustc_legacy_const_generics(1)]
9332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9333pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9334    static_assert!(N >= 1 && N <= 64);
9335    unsafe extern "unadjusted" {
9336        #[cfg_attr(
9337            any(target_arch = "aarch64", target_arch = "arm64ec"),
9338            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9339        )]
9340        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9341    }
9342    unsafe { _vcvtd_n_u64_f64(a, N) }
9343}
9344#[doc = "Fixed-point convert to floating-point"]
9345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9346#[inline]
9347#[target_feature(enable = "neon")]
9348#[cfg_attr(test, assert_instr(fcvtzs))]
9349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9350pub fn vcvts_s32_f32(a: f32) -> i32 {
9351    a as i32
9352}
9353#[doc = "Fixed-point convert to floating-point"]
9354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9355#[inline]
9356#[target_feature(enable = "neon")]
9357#[cfg_attr(test, assert_instr(fcvtzs))]
9358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9359pub fn vcvtd_s64_f64(a: f64) -> i64 {
9360    a as i64
9361}
9362#[doc = "Fixed-point convert to floating-point"]
9363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9364#[inline]
9365#[target_feature(enable = "neon")]
9366#[cfg_attr(test, assert_instr(fcvtzu))]
9367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9368pub fn vcvts_u32_f32(a: f32) -> u32 {
9369    a as u32
9370}
9371#[doc = "Fixed-point convert to floating-point"]
9372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9373#[inline]
9374#[target_feature(enable = "neon")]
9375#[cfg_attr(test, assert_instr(fcvtzu))]
9376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9377pub fn vcvtd_u64_f64(a: f64) -> u64 {
9378    a as u64
9379}
9380#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9382#[inline]
9383#[target_feature(enable = "neon")]
9384#[cfg_attr(test, assert_instr(fcvtxn))]
9385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9386pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9387    unsafe extern "unadjusted" {
9388        #[cfg_attr(
9389            any(target_arch = "aarch64", target_arch = "arm64ec"),
9390            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9391        )]
9392        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9393    }
9394    unsafe { _vcvtx_f32_f64(a) }
9395}
9396#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9398#[inline]
9399#[target_feature(enable = "neon")]
9400#[cfg_attr(test, assert_instr(fcvtxn2))]
9401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9402pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9403    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9404}
9405#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9407#[inline]
9408#[target_feature(enable = "neon")]
9409#[cfg_attr(test, assert_instr(fcvtxn))]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9412    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9413}
9414#[doc = "Divide"]
9415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9416#[inline]
9417#[target_feature(enable = "neon,fp16")]
9418#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9419#[cfg(not(target_arch = "arm64ec"))]
9420#[cfg_attr(test, assert_instr(fdiv))]
9421pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9422    unsafe { simd_div(a, b) }
9423}
9424#[doc = "Divide"]
9425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9426#[inline]
9427#[target_feature(enable = "neon,fp16")]
9428#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9429#[cfg(not(target_arch = "arm64ec"))]
9430#[cfg_attr(test, assert_instr(fdiv))]
9431pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9432    unsafe { simd_div(a, b) }
9433}
9434#[doc = "Divide"]
9435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9436#[inline]
9437#[target_feature(enable = "neon")]
9438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9439#[cfg_attr(test, assert_instr(fdiv))]
9440pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9441    unsafe { simd_div(a, b) }
9442}
9443#[doc = "Divide"]
9444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9445#[inline]
9446#[target_feature(enable = "neon")]
9447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9448#[cfg_attr(test, assert_instr(fdiv))]
9449pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9450    unsafe { simd_div(a, b) }
9451}
9452#[doc = "Divide"]
9453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9454#[inline]
9455#[target_feature(enable = "neon")]
9456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9457#[cfg_attr(test, assert_instr(fdiv))]
9458pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9459    unsafe { simd_div(a, b) }
9460}
9461#[doc = "Divide"]
9462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9463#[inline]
9464#[target_feature(enable = "neon")]
9465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9466#[cfg_attr(test, assert_instr(fdiv))]
9467pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9468    unsafe { simd_div(a, b) }
9469}
9470#[doc = "Divide"]
9471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9472#[inline]
9473#[target_feature(enable = "neon,fp16")]
9474#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9475#[cfg(not(target_arch = "arm64ec"))]
9476#[cfg_attr(test, assert_instr(nop))]
9477pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9478    a / b
9479}
9480#[doc = "Dot product arithmetic (indexed)"]
9481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9482#[inline]
9483#[target_feature(enable = "neon,dotprod")]
9484#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9485#[rustc_legacy_const_generics(3)]
9486#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9487pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9488    static_assert_uimm_bits!(LANE, 2);
9489    unsafe {
9490        let c: int32x4_t = transmute(c);
9491        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9492        vdot_s32(a, b, transmute(c))
9493    }
9494}
9495#[doc = "Dot product arithmetic (indexed)"]
9496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9497#[inline]
9498#[target_feature(enable = "neon,dotprod")]
9499#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9500#[rustc_legacy_const_generics(3)]
9501#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9502pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9503    static_assert_uimm_bits!(LANE, 2);
9504    unsafe {
9505        let c: int32x4_t = transmute(c);
9506        let c: int32x4_t =
9507            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9508        vdotq_s32(a, b, transmute(c))
9509    }
9510}
9511#[doc = "Dot product arithmetic (indexed)"]
9512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9513#[inline]
9514#[target_feature(enable = "neon,dotprod")]
9515#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9516#[rustc_legacy_const_generics(3)]
9517#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9518pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9519    static_assert_uimm_bits!(LANE, 2);
9520    unsafe {
9521        let c: uint32x4_t = transmute(c);
9522        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9523        vdot_u32(a, b, transmute(c))
9524    }
9525}
9526#[doc = "Dot product arithmetic (indexed)"]
9527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9528#[inline]
9529#[target_feature(enable = "neon,dotprod")]
9530#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9531#[rustc_legacy_const_generics(3)]
9532#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9533pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9534    static_assert_uimm_bits!(LANE, 2);
9535    unsafe {
9536        let c: uint32x4_t = transmute(c);
9537        let c: uint32x4_t =
9538            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9539        vdotq_u32(a, b, transmute(c))
9540    }
9541}
9542#[doc = "Set all vector lanes to the same value"]
9543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9544#[inline]
9545#[target_feature(enable = "neon")]
9546#[cfg_attr(test, assert_instr(nop, N = 0))]
9547#[rustc_legacy_const_generics(1)]
9548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9549pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9550    static_assert!(N == 0);
9551    a
9552}
9553#[doc = "Set all vector lanes to the same value"]
9554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9555#[inline]
9556#[target_feature(enable = "neon")]
9557#[cfg_attr(test, assert_instr(nop, N = 0))]
9558#[rustc_legacy_const_generics(1)]
9559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9560pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9561    static_assert!(N == 0);
9562    a
9563}
9564#[doc = "Set all vector lanes to the same value"]
9565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9566#[inline]
9567#[target_feature(enable = "neon")]
9568#[cfg_attr(test, assert_instr(nop, N = 1))]
9569#[rustc_legacy_const_generics(1)]
9570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9571pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9572    static_assert_uimm_bits!(N, 1);
9573    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9574}
9575#[doc = "Set all vector lanes to the same value"]
9576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9577#[inline]
9578#[target_feature(enable = "neon")]
9579#[cfg_attr(test, assert_instr(nop, N = 1))]
9580#[rustc_legacy_const_generics(1)]
9581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9582pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9583    static_assert_uimm_bits!(N, 1);
9584    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9585}
9586#[doc = "Set all vector lanes to the same value"]
9587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9588#[inline]
9589#[target_feature(enable = "neon")]
9590#[cfg_attr(test, assert_instr(nop, N = 4))]
9591#[rustc_legacy_const_generics(1)]
9592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9593pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9594    static_assert_uimm_bits!(N, 3);
9595    unsafe { simd_extract!(a, N as u32) }
9596}
9597#[doc = "Set all vector lanes to the same value"]
9598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9599#[inline]
9600#[target_feature(enable = "neon")]
9601#[cfg_attr(test, assert_instr(nop, N = 4))]
9602#[rustc_legacy_const_generics(1)]
9603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9604pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9605    static_assert_uimm_bits!(N, 3);
9606    unsafe { simd_extract!(a, N as u32) }
9607}
9608#[doc = "Set all vector lanes to the same value"]
9609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9610#[inline]
9611#[target_feature(enable = "neon")]
9612#[cfg_attr(test, assert_instr(nop, N = 4))]
9613#[rustc_legacy_const_generics(1)]
9614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9615pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9616    static_assert_uimm_bits!(N, 3);
9617    unsafe { simd_extract!(a, N as u32) }
9618}
9619#[doc = "Set all vector lanes to the same value"]
9620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9621#[inline]
9622#[target_feature(enable = "neon")]
9623#[cfg_attr(test, assert_instr(nop, N = 4))]
9624#[rustc_legacy_const_generics(1)]
9625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9626pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9627    static_assert_uimm_bits!(N, 3);
9628    unsafe { simd_extract!(a, N as u32) }
9629}
9630#[doc = "Set all vector lanes to the same value"]
9631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9632#[inline]
9633#[target_feature(enable = "neon")]
9634#[cfg_attr(test, assert_instr(nop, N = 4))]
9635#[rustc_legacy_const_generics(1)]
9636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9637pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9638    static_assert_uimm_bits!(N, 3);
9639    unsafe { simd_extract!(a, N as u32) }
9640}
9641#[doc = "Set all vector lanes to the same value"]
9642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9643#[inline]
9644#[target_feature(enable = "neon")]
9645#[cfg_attr(test, assert_instr(nop, N = 4))]
9646#[rustc_legacy_const_generics(1)]
9647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9648pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9649    static_assert_uimm_bits!(N, 3);
9650    unsafe { simd_extract!(a, N as u32) }
9651}
9652#[doc = "Extract an element from a vector"]
9653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9654#[inline]
9655#[target_feature(enable = "neon")]
9656#[cfg_attr(test, assert_instr(nop, N = 8))]
9657#[rustc_legacy_const_generics(1)]
9658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9659pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9660    static_assert_uimm_bits!(N, 4);
9661    unsafe { simd_extract!(a, N as u32) }
9662}
9663#[doc = "Extract an element from a vector"]
9664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9665#[inline]
9666#[target_feature(enable = "neon")]
9667#[cfg_attr(test, assert_instr(nop, N = 8))]
9668#[rustc_legacy_const_generics(1)]
9669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9670pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9671    static_assert_uimm_bits!(N, 4);
9672    unsafe { simd_extract!(a, N as u32) }
9673}
9674#[doc = "Extract an element from a vector"]
9675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9676#[inline]
9677#[target_feature(enable = "neon")]
9678#[cfg_attr(test, assert_instr(nop, N = 8))]
9679#[rustc_legacy_const_generics(1)]
9680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9681pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9682    static_assert_uimm_bits!(N, 4);
9683    unsafe { simd_extract!(a, N as u32) }
9684}
9685#[doc = "Set all vector lanes to the same value"]
9686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9687#[inline]
9688#[target_feature(enable = "neon")]
9689#[cfg_attr(test, assert_instr(nop, N = 0))]
9690#[rustc_legacy_const_generics(1)]
9691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9692pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9693    static_assert!(N == 0);
9694    unsafe { simd_extract!(a, N as u32) }
9695}
9696#[doc = "Set all vector lanes to the same value"]
9697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9698#[inline]
9699#[target_feature(enable = "neon")]
9700#[cfg_attr(test, assert_instr(nop, N = 0))]
9701#[rustc_legacy_const_generics(1)]
9702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9703pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9704    static_assert!(N == 0);
9705    unsafe { simd_extract!(a, N as u32) }
9706}
9707#[doc = "Set all vector lanes to the same value"]
9708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9709#[inline]
9710#[target_feature(enable = "neon")]
9711#[cfg_attr(test, assert_instr(nop, N = 0))]
9712#[rustc_legacy_const_generics(1)]
9713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9714pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9715    static_assert!(N == 0);
9716    unsafe { simd_extract!(a, N as u32) }
9717}
9718#[doc = "Set all vector lanes to the same value"]
9719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9720#[inline]
9721#[cfg_attr(test, assert_instr(nop, N = 2))]
9722#[rustc_legacy_const_generics(1)]
9723#[target_feature(enable = "neon,fp16")]
9724#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9725#[cfg(not(target_arch = "arm64ec"))]
9726pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9727    static_assert_uimm_bits!(N, 2);
9728    unsafe { simd_extract!(a, N as u32) }
9729}
9730#[doc = "Extract an element from a vector"]
9731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9732#[inline]
9733#[cfg_attr(test, assert_instr(nop, N = 4))]
9734#[rustc_legacy_const_generics(1)]
9735#[target_feature(enable = "neon,fp16")]
9736#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9737#[cfg(not(target_arch = "arm64ec"))]
9738pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9739    static_assert_uimm_bits!(N, 4);
9740    unsafe { simd_extract!(a, N as u32) }
9741}
9742#[doc = "Set all vector lanes to the same value"]
9743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9744#[inline]
9745#[target_feature(enable = "neon")]
9746#[cfg_attr(test, assert_instr(dup, N = 0))]
9747#[rustc_legacy_const_generics(1)]
9748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9749pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9750    static_assert!(N == 0);
9751    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9752}
9753#[doc = "Set all vector lanes to the same value"]
9754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9755#[inline]
9756#[target_feature(enable = "neon")]
9757#[cfg_attr(test, assert_instr(dup, N = 0))]
9758#[rustc_legacy_const_generics(1)]
9759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9760pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9761    static_assert!(N == 0);
9762    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9763}
9764#[doc = "Set all vector lanes to the same value"]
9765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9766#[inline]
9767#[target_feature(enable = "neon")]
9768#[cfg_attr(test, assert_instr(dup, N = 1))]
9769#[rustc_legacy_const_generics(1)]
9770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9771pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9772    static_assert_uimm_bits!(N, 1);
9773    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9774}
9775#[doc = "Set all vector lanes to the same value"]
9776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9777#[inline]
9778#[target_feature(enable = "neon")]
9779#[cfg_attr(test, assert_instr(dup, N = 1))]
9780#[rustc_legacy_const_generics(1)]
9781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9782pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9783    static_assert_uimm_bits!(N, 1);
9784    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9785}
9786#[doc = "Set all vector lanes to the same value"]
9787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9788#[inline]
9789#[target_feature(enable = "neon")]
9790#[cfg_attr(test, assert_instr(nop, N = 1))]
9791#[rustc_legacy_const_generics(1)]
9792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9793pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9794    static_assert_uimm_bits!(N, 1);
9795    unsafe { simd_extract!(a, N as u32) }
9796}
9797#[doc = "Set all vector lanes to the same value"]
9798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9799#[inline]
9800#[target_feature(enable = "neon")]
9801#[cfg_attr(test, assert_instr(nop, N = 1))]
9802#[rustc_legacy_const_generics(1)]
9803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9804pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9805    static_assert_uimm_bits!(N, 1);
9806    unsafe { simd_extract!(a, N as u32) }
9807}
9808#[doc = "Set all vector lanes to the same value"]
9809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9810#[inline]
9811#[target_feature(enable = "neon")]
9812#[cfg_attr(test, assert_instr(nop, N = 1))]
9813#[rustc_legacy_const_generics(1)]
9814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9815pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9816    static_assert_uimm_bits!(N, 1);
9817    unsafe { simd_extract!(a, N as u32) }
9818}
9819#[doc = "Set all vector lanes to the same value"]
9820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9821#[inline]
9822#[target_feature(enable = "neon")]
9823#[cfg_attr(test, assert_instr(nop, N = 1))]
9824#[rustc_legacy_const_generics(1)]
9825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9826pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9827    static_assert_uimm_bits!(N, 1);
9828    unsafe { simd_extract!(a, N as u32) }
9829}
9830#[doc = "Set all vector lanes to the same value"]
9831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9832#[inline]
9833#[target_feature(enable = "neon")]
9834#[cfg_attr(test, assert_instr(nop, N = 1))]
9835#[rustc_legacy_const_generics(1)]
9836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9837pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9838    static_assert_uimm_bits!(N, 1);
9839    unsafe { simd_extract!(a, N as u32) }
9840}
9841#[doc = "Set all vector lanes to the same value"]
9842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9843#[inline]
9844#[target_feature(enable = "neon")]
9845#[cfg_attr(test, assert_instr(nop, N = 1))]
9846#[rustc_legacy_const_generics(1)]
9847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9848pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9849    static_assert_uimm_bits!(N, 1);
9850    unsafe { simd_extract!(a, N as u32) }
9851}
9852#[doc = "Set all vector lanes to the same value"]
9853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9854#[inline]
9855#[target_feature(enable = "neon")]
9856#[cfg_attr(test, assert_instr(nop, N = 2))]
9857#[rustc_legacy_const_generics(1)]
9858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9859pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9860    static_assert_uimm_bits!(N, 2);
9861    unsafe { simd_extract!(a, N as u32) }
9862}
9863#[doc = "Set all vector lanes to the same value"]
9864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9865#[inline]
9866#[target_feature(enable = "neon")]
9867#[cfg_attr(test, assert_instr(nop, N = 2))]
9868#[rustc_legacy_const_generics(1)]
9869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9870pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9871    static_assert_uimm_bits!(N, 2);
9872    unsafe { simd_extract!(a, N as u32) }
9873}
9874#[doc = "Set all vector lanes to the same value"]
9875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9876#[inline]
9877#[target_feature(enable = "neon")]
9878#[cfg_attr(test, assert_instr(nop, N = 2))]
9879#[rustc_legacy_const_generics(1)]
9880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9881pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9882    static_assert_uimm_bits!(N, 2);
9883    unsafe { simd_extract!(a, N as u32) }
9884}
9885#[doc = "Set all vector lanes to the same value"]
9886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9887#[inline]
9888#[target_feature(enable = "neon")]
9889#[cfg_attr(test, assert_instr(nop, N = 2))]
9890#[rustc_legacy_const_generics(1)]
9891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9892pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9893    static_assert_uimm_bits!(N, 2);
9894    unsafe { simd_extract!(a, N as u32) }
9895}
9896#[doc = "Set all vector lanes to the same value"]
9897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9898#[inline]
9899#[target_feature(enable = "neon")]
9900#[cfg_attr(test, assert_instr(nop, N = 2))]
9901#[rustc_legacy_const_generics(1)]
9902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9903pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9904    static_assert_uimm_bits!(N, 2);
9905    unsafe { simd_extract!(a, N as u32) }
9906}
9907#[doc = "Set all vector lanes to the same value"]
9908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9909#[inline]
9910#[target_feature(enable = "neon")]
9911#[cfg_attr(test, assert_instr(nop, N = 2))]
9912#[rustc_legacy_const_generics(1)]
9913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9914pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9915    static_assert_uimm_bits!(N, 2);
9916    unsafe { simd_extract!(a, N as u32) }
9917}
9918#[doc = "Three-way exclusive OR"]
9919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9920#[inline]
9921#[target_feature(enable = "neon,sha3")]
9922#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9923#[cfg_attr(test, assert_instr(eor3))]
9924pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9925    unsafe extern "unadjusted" {
9926        #[cfg_attr(
9927            any(target_arch = "aarch64", target_arch = "arm64ec"),
9928            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9929        )]
9930        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9931    }
9932    unsafe { _veor3q_s8(a, b, c) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9936#[inline]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9941    unsafe extern "unadjusted" {
9942        #[cfg_attr(
9943            any(target_arch = "aarch64", target_arch = "arm64ec"),
9944            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9945        )]
9946        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9947    }
9948    unsafe { _veor3q_s16(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9952#[inline]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9957    unsafe extern "unadjusted" {
9958        #[cfg_attr(
9959            any(target_arch = "aarch64", target_arch = "arm64ec"),
9960            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9961        )]
9962        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9963    }
9964    unsafe { _veor3q_s32(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9968#[inline]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9973    unsafe extern "unadjusted" {
9974        #[cfg_attr(
9975            any(target_arch = "aarch64", target_arch = "arm64ec"),
9976            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9977        )]
9978        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9979    }
9980    unsafe { _veor3q_s64(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9984#[inline]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9989    unsafe extern "unadjusted" {
9990        #[cfg_attr(
9991            any(target_arch = "aarch64", target_arch = "arm64ec"),
9992            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
9993        )]
9994        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9995    }
9996    unsafe { _veor3q_u8(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10000#[inline]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10005    unsafe extern "unadjusted" {
10006        #[cfg_attr(
10007            any(target_arch = "aarch64", target_arch = "arm64ec"),
10008            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10009        )]
10010        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10011    }
10012    unsafe { _veor3q_u16(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10016#[inline]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10021    unsafe extern "unadjusted" {
10022        #[cfg_attr(
10023            any(target_arch = "aarch64", target_arch = "arm64ec"),
10024            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10025        )]
10026        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10027    }
10028    unsafe { _veor3q_u32(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10032#[inline]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10037    unsafe extern "unadjusted" {
10038        #[cfg_attr(
10039            any(target_arch = "aarch64", target_arch = "arm64ec"),
10040            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10041        )]
10042        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10043    }
10044    unsafe { _veor3q_u64(a, b, c) }
10045}
10046#[doc = "Extract vector from pair of vectors"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10048#[inline]
10049#[target_feature(enable = "neon")]
10050#[cfg_attr(test, assert_instr(ext, N = 1))]
10051#[rustc_legacy_const_generics(2)]
10052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10053pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10054    static_assert_uimm_bits!(N, 1);
10055    unsafe {
10056        match N & 0b1 {
10057            0 => simd_shuffle!(a, b, [0, 1]),
10058            1 => simd_shuffle!(a, b, [1, 2]),
10059            _ => unreachable_unchecked(),
10060        }
10061    }
10062}
10063#[doc = "Extract vector from pair of vectors"]
10064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10065#[inline]
10066#[target_feature(enable = "neon")]
10067#[cfg_attr(test, assert_instr(ext, N = 1))]
10068#[rustc_legacy_const_generics(2)]
10069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10070pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10071    static_assert_uimm_bits!(N, 1);
10072    unsafe {
10073        match N & 0b1 {
10074            0 => simd_shuffle!(a, b, [0, 1]),
10075            1 => simd_shuffle!(a, b, [1, 2]),
10076            _ => unreachable_unchecked(),
10077        }
10078    }
10079}
10080#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10082#[inline]
10083#[target_feature(enable = "neon")]
10084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10085#[cfg_attr(test, assert_instr(fmadd))]
10086pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10087    unsafe { simd_fma(b, c, a) }
10088}
10089#[doc = "Floating-point fused multiply-add to accumulator"]
10090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10091#[inline]
10092#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10093#[rustc_legacy_const_generics(3)]
10094#[target_feature(enable = "neon,fp16")]
10095#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10096#[cfg(not(target_arch = "arm64ec"))]
10097pub fn vfma_lane_f16<const LANE: i32>(
10098    a: float16x4_t,
10099    b: float16x4_t,
10100    c: float16x4_t,
10101) -> float16x4_t {
10102    static_assert_uimm_bits!(LANE, 2);
10103    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10107#[inline]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_laneq_f16<const LANE: i32>(
10114    a: float16x4_t,
10115    b: float16x4_t,
10116    c: float16x8_t,
10117) -> float16x4_t {
10118    static_assert_uimm_bits!(LANE, 3);
10119    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10123#[inline]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfmaq_lane_f16<const LANE: i32>(
10130    a: float16x8_t,
10131    b: float16x8_t,
10132    c: float16x4_t,
10133) -> float16x8_t {
10134    static_assert_uimm_bits!(LANE, 2);
10135    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10139#[inline]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_laneq_f16<const LANE: i32>(
10146    a: float16x8_t,
10147    b: float16x8_t,
10148    c: float16x8_t,
10149) -> float16x8_t {
10150    static_assert_uimm_bits!(LANE, 3);
10151    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10155#[inline]
10156#[target_feature(enable = "neon")]
10157#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10158#[rustc_legacy_const_generics(3)]
10159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10160pub fn vfma_lane_f32<const LANE: i32>(
10161    a: float32x2_t,
10162    b: float32x2_t,
10163    c: float32x2_t,
10164) -> float32x2_t {
10165    static_assert_uimm_bits!(LANE, 1);
10166    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10167}
10168#[doc = "Floating-point fused multiply-add to accumulator"]
10169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10170#[inline]
10171#[target_feature(enable = "neon")]
10172#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10173#[rustc_legacy_const_generics(3)]
10174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10175pub fn vfma_laneq_f32<const LANE: i32>(
10176    a: float32x2_t,
10177    b: float32x2_t,
10178    c: float32x4_t,
10179) -> float32x2_t {
10180    static_assert_uimm_bits!(LANE, 2);
10181    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10182}
10183#[doc = "Floating-point fused multiply-add to accumulator"]
10184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10185#[inline]
10186#[target_feature(enable = "neon")]
10187#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10188#[rustc_legacy_const_generics(3)]
10189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10190pub fn vfmaq_lane_f32<const LANE: i32>(
10191    a: float32x4_t,
10192    b: float32x4_t,
10193    c: float32x2_t,
10194) -> float32x4_t {
10195    static_assert_uimm_bits!(LANE, 1);
10196    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10197}
10198#[doc = "Floating-point fused multiply-add to accumulator"]
10199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10200#[inline]
10201#[target_feature(enable = "neon")]
10202#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10203#[rustc_legacy_const_generics(3)]
10204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10205pub fn vfmaq_laneq_f32<const LANE: i32>(
10206    a: float32x4_t,
10207    b: float32x4_t,
10208    c: float32x4_t,
10209) -> float32x4_t {
10210    static_assert_uimm_bits!(LANE, 2);
10211    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10212}
10213#[doc = "Floating-point fused multiply-add to accumulator"]
10214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10215#[inline]
10216#[target_feature(enable = "neon")]
10217#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10218#[rustc_legacy_const_generics(3)]
10219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10220pub fn vfmaq_laneq_f64<const LANE: i32>(
10221    a: float64x2_t,
10222    b: float64x2_t,
10223    c: float64x2_t,
10224) -> float64x2_t {
10225    static_assert_uimm_bits!(LANE, 1);
10226    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10227}
10228#[doc = "Floating-point fused multiply-add to accumulator"]
10229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10230#[inline]
10231#[target_feature(enable = "neon")]
10232#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10233#[rustc_legacy_const_generics(3)]
10234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10235pub fn vfma_lane_f64<const LANE: i32>(
10236    a: float64x1_t,
10237    b: float64x1_t,
10238    c: float64x1_t,
10239) -> float64x1_t {
10240    static_assert!(LANE == 0);
10241    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10242}
10243#[doc = "Floating-point fused multiply-add to accumulator"]
10244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10245#[inline]
10246#[target_feature(enable = "neon")]
10247#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10248#[rustc_legacy_const_generics(3)]
10249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10250pub fn vfma_laneq_f64<const LANE: i32>(
10251    a: float64x1_t,
10252    b: float64x1_t,
10253    c: float64x2_t,
10254) -> float64x1_t {
10255    static_assert_uimm_bits!(LANE, 1);
10256    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10257}
10258#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10260#[inline]
10261#[target_feature(enable = "neon,fp16")]
10262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10263#[cfg(not(target_arch = "arm64ec"))]
10264#[cfg_attr(test, assert_instr(fmla))]
10265pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10266    vfma_f16(a, b, vdup_n_f16(c))
10267}
10268#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10270#[inline]
10271#[target_feature(enable = "neon,fp16")]
10272#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10273#[cfg(not(target_arch = "arm64ec"))]
10274#[cfg_attr(test, assert_instr(fmla))]
10275pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10276    vfmaq_f16(a, b, vdupq_n_f16(c))
10277}
10278#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10280#[inline]
10281#[target_feature(enable = "neon")]
10282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10283#[cfg_attr(test, assert_instr(fmadd))]
10284pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10285    vfma_f64(a, b, vdup_n_f64(c))
10286}
10287#[doc = "Floating-point fused multiply-add to accumulator"]
10288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10289#[inline]
10290#[target_feature(enable = "neon")]
10291#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10292#[rustc_legacy_const_generics(3)]
10293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10294pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10295    static_assert!(LANE == 0);
10296    unsafe {
10297        let c: f64 = simd_extract!(c, LANE as u32);
10298        fmaf64(b, c, a)
10299    }
10300}
10301#[doc = "Floating-point fused multiply-add to accumulator"]
10302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10303#[inline]
10304#[cfg_attr(test, assert_instr(fmadd))]
10305#[target_feature(enable = "neon,fp16")]
10306#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10307#[cfg(not(target_arch = "arm64ec"))]
10308pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10309    fmaf16(b, c, a)
10310}
10311#[doc = "Floating-point fused multiply-add to accumulator"]
10312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10313#[inline]
10314#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10315#[rustc_legacy_const_generics(3)]
10316#[target_feature(enable = "neon,fp16")]
10317#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10318#[cfg(not(target_arch = "arm64ec"))]
10319pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10320    static_assert_uimm_bits!(LANE, 2);
10321    unsafe {
10322        let c: f16 = simd_extract!(v, LANE as u32);
10323        vfmah_f16(a, b, c)
10324    }
10325}
10326#[doc = "Floating-point fused multiply-add to accumulator"]
10327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10328#[inline]
10329#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10330#[rustc_legacy_const_generics(3)]
10331#[target_feature(enable = "neon,fp16")]
10332#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10333#[cfg(not(target_arch = "arm64ec"))]
10334pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10335    static_assert_uimm_bits!(LANE, 3);
10336    unsafe {
10337        let c: f16 = simd_extract!(v, LANE as u32);
10338        vfmah_f16(a, b, c)
10339    }
10340}
10341#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10343#[inline]
10344#[target_feature(enable = "neon")]
10345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10346#[cfg_attr(test, assert_instr(fmla))]
10347pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10348    unsafe { simd_fma(b, c, a) }
10349}
10350#[doc = "Floating-point fused multiply-add to accumulator"]
10351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10352#[inline]
10353#[target_feature(enable = "neon")]
10354#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10355#[rustc_legacy_const_generics(3)]
10356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10357pub fn vfmaq_lane_f64<const LANE: i32>(
10358    a: float64x2_t,
10359    b: float64x2_t,
10360    c: float64x1_t,
10361) -> float64x2_t {
10362    static_assert!(LANE == 0);
10363    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10364}
10365#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10367#[inline]
10368#[target_feature(enable = "neon")]
10369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10370#[cfg_attr(test, assert_instr(fmla))]
10371pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10372    vfmaq_f64(a, b, vdupq_n_f64(c))
10373}
10374#[doc = "Floating-point fused multiply-add to accumulator"]
10375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10376#[inline]
10377#[target_feature(enable = "neon")]
10378#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10379#[rustc_legacy_const_generics(3)]
10380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10381pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10382    static_assert_uimm_bits!(LANE, 1);
10383    unsafe {
10384        let c: f32 = simd_extract!(c, LANE as u32);
10385        fmaf32(b, c, a)
10386    }
10387}
10388#[doc = "Floating-point fused multiply-add to accumulator"]
10389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10390#[inline]
10391#[target_feature(enable = "neon")]
10392#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10393#[rustc_legacy_const_generics(3)]
10394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10395pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10396    static_assert_uimm_bits!(LANE, 2);
10397    unsafe {
10398        let c: f32 = simd_extract!(c, LANE as u32);
10399        fmaf32(b, c, a)
10400    }
10401}
10402#[doc = "Floating-point fused multiply-add to accumulator"]
10403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10404#[inline]
10405#[target_feature(enable = "neon")]
10406#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10407#[rustc_legacy_const_generics(3)]
10408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10409pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10410    static_assert_uimm_bits!(LANE, 1);
10411    unsafe {
10412        let c: f64 = simd_extract!(c, LANE as u32);
10413        fmaf64(b, c, a)
10414    }
10415}
10416#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10418#[inline]
10419#[target_feature(enable = "neon,fp16")]
10420#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10421#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10422#[cfg(not(target_arch = "arm64ec"))]
10423#[cfg_attr(test, assert_instr(fmlal2))]
10424pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10425    unsafe extern "unadjusted" {
10426        #[cfg_attr(
10427            any(target_arch = "aarch64", target_arch = "arm64ec"),
10428            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10429        )]
10430        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10431    }
10432    unsafe { _vfmlal_high_f16(r, a, b) }
10433}
10434#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10436#[inline]
10437#[target_feature(enable = "neon,fp16")]
10438#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10439#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10440#[cfg(not(target_arch = "arm64ec"))]
10441#[cfg_attr(test, assert_instr(fmlal2))]
10442pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10443    unsafe extern "unadjusted" {
10444        #[cfg_attr(
10445            any(target_arch = "aarch64", target_arch = "arm64ec"),
10446            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10447        )]
10448        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10449    }
10450    unsafe { _vfmlalq_high_f16(r, a, b) }
10451}
10452#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10454#[inline]
10455#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10456#[target_feature(enable = "neon,fp16")]
10457#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10458#[rustc_legacy_const_generics(3)]
10459#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10460#[cfg(not(target_arch = "arm64ec"))]
10461pub fn vfmlal_lane_high_f16<const LANE: i32>(
10462    r: float32x2_t,
10463    a: float16x4_t,
10464    b: float16x4_t,
10465) -> float32x2_t {
10466    static_assert_uimm_bits!(LANE, 2);
10467    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10468}
10469#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10471#[inline]
10472#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10473#[target_feature(enable = "neon,fp16")]
10474#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10475#[rustc_legacy_const_generics(3)]
10476#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10477#[cfg(not(target_arch = "arm64ec"))]
10478pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10479    r: float32x2_t,
10480    a: float16x4_t,
10481    b: float16x8_t,
10482) -> float32x2_t {
10483    static_assert_uimm_bits!(LANE, 3);
10484    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10485}
10486#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10488#[inline]
10489#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10490#[target_feature(enable = "neon,fp16")]
10491#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10492#[rustc_legacy_const_generics(3)]
10493#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10494#[cfg(not(target_arch = "arm64ec"))]
10495pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10496    r: float32x4_t,
10497    a: float16x8_t,
10498    b: float16x4_t,
10499) -> float32x4_t {
10500    static_assert_uimm_bits!(LANE, 2);
10501    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10502}
10503#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10505#[inline]
10506#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10507#[target_feature(enable = "neon,fp16")]
10508#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10509#[rustc_legacy_const_generics(3)]
10510#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10511#[cfg(not(target_arch = "arm64ec"))]
10512pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10513    r: float32x4_t,
10514    a: float16x8_t,
10515    b: float16x8_t,
10516) -> float32x4_t {
10517    static_assert_uimm_bits!(LANE, 3);
10518    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10519}
10520#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10522#[inline]
10523#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10524#[target_feature(enable = "neon,fp16")]
10525#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10526#[rustc_legacy_const_generics(3)]
10527#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10528#[cfg(not(target_arch = "arm64ec"))]
10529pub fn vfmlal_lane_low_f16<const LANE: i32>(
10530    r: float32x2_t,
10531    a: float16x4_t,
10532    b: float16x4_t,
10533) -> float32x2_t {
10534    static_assert_uimm_bits!(LANE, 2);
10535    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10536}
10537#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10539#[inline]
10540#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10541#[target_feature(enable = "neon,fp16")]
10542#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10543#[rustc_legacy_const_generics(3)]
10544#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10545#[cfg(not(target_arch = "arm64ec"))]
10546pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10547    r: float32x2_t,
10548    a: float16x4_t,
10549    b: float16x8_t,
10550) -> float32x2_t {
10551    static_assert_uimm_bits!(LANE, 3);
10552    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10553}
10554#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10556#[inline]
10557#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10558#[target_feature(enable = "neon,fp16")]
10559#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10560#[rustc_legacy_const_generics(3)]
10561#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10562#[cfg(not(target_arch = "arm64ec"))]
10563pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10564    r: float32x4_t,
10565    a: float16x8_t,
10566    b: float16x4_t,
10567) -> float32x4_t {
10568    static_assert_uimm_bits!(LANE, 2);
10569    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10570}
10571#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10573#[inline]
10574#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10575#[target_feature(enable = "neon,fp16")]
10576#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10577#[rustc_legacy_const_generics(3)]
10578#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10579#[cfg(not(target_arch = "arm64ec"))]
10580pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10581    r: float32x4_t,
10582    a: float16x8_t,
10583    b: float16x8_t,
10584) -> float32x4_t {
10585    static_assert_uimm_bits!(LANE, 3);
10586    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10587}
10588#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10590#[inline]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10594#[cfg(not(target_arch = "arm64ec"))]
10595#[cfg_attr(test, assert_instr(fmlal))]
10596pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10597    unsafe extern "unadjusted" {
10598        #[cfg_attr(
10599            any(target_arch = "aarch64", target_arch = "arm64ec"),
10600            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10601        )]
10602        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10603    }
10604    unsafe { _vfmlal_low_f16(r, a, b) }
10605}
10606#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10608#[inline]
10609#[target_feature(enable = "neon,fp16")]
10610#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10611#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10612#[cfg(not(target_arch = "arm64ec"))]
10613#[cfg_attr(test, assert_instr(fmlal))]
10614pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10615    unsafe extern "unadjusted" {
10616        #[cfg_attr(
10617            any(target_arch = "aarch64", target_arch = "arm64ec"),
10618            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10619        )]
10620        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10621    }
10622    unsafe { _vfmlalq_low_f16(r, a, b) }
10623}
10624#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10626#[inline]
10627#[target_feature(enable = "neon,fp16")]
10628#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10629#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10630#[cfg(not(target_arch = "arm64ec"))]
10631#[cfg_attr(test, assert_instr(fmlsl2))]
10632pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10633    unsafe extern "unadjusted" {
10634        #[cfg_attr(
10635            any(target_arch = "aarch64", target_arch = "arm64ec"),
10636            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10637        )]
10638        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10639    }
10640    unsafe { _vfmlsl_high_f16(r, a, b) }
10641}
10642#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10644#[inline]
10645#[target_feature(enable = "neon,fp16")]
10646#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10647#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10648#[cfg(not(target_arch = "arm64ec"))]
10649#[cfg_attr(test, assert_instr(fmlsl2))]
10650pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10651    unsafe extern "unadjusted" {
10652        #[cfg_attr(
10653            any(target_arch = "aarch64", target_arch = "arm64ec"),
10654            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10655        )]
10656        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10657    }
10658    unsafe { _vfmlslq_high_f16(r, a, b) }
10659}
10660#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10662#[inline]
10663#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10664#[target_feature(enable = "neon,fp16")]
10665#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10666#[rustc_legacy_const_generics(3)]
10667#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10668#[cfg(not(target_arch = "arm64ec"))]
10669pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10670    r: float32x2_t,
10671    a: float16x4_t,
10672    b: float16x4_t,
10673) -> float32x2_t {
10674    static_assert_uimm_bits!(LANE, 2);
10675    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10676}
10677#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10679#[inline]
10680#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10681#[target_feature(enable = "neon,fp16")]
10682#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10683#[rustc_legacy_const_generics(3)]
10684#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10685#[cfg(not(target_arch = "arm64ec"))]
10686pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10687    r: float32x2_t,
10688    a: float16x4_t,
10689    b: float16x8_t,
10690) -> float32x2_t {
10691    static_assert_uimm_bits!(LANE, 3);
10692    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10693}
10694#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10696#[inline]
10697#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10698#[target_feature(enable = "neon,fp16")]
10699#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10700#[rustc_legacy_const_generics(3)]
10701#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10702#[cfg(not(target_arch = "arm64ec"))]
10703pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10704    r: float32x4_t,
10705    a: float16x8_t,
10706    b: float16x4_t,
10707) -> float32x4_t {
10708    static_assert_uimm_bits!(LANE, 2);
10709    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10710}
10711#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10713#[inline]
10714#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10715#[target_feature(enable = "neon,fp16")]
10716#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10717#[rustc_legacy_const_generics(3)]
10718#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10719#[cfg(not(target_arch = "arm64ec"))]
10720pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10721    r: float32x4_t,
10722    a: float16x8_t,
10723    b: float16x8_t,
10724) -> float32x4_t {
10725    static_assert_uimm_bits!(LANE, 3);
10726    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10727}
10728#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10730#[inline]
10731#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10732#[target_feature(enable = "neon,fp16")]
10733#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10734#[rustc_legacy_const_generics(3)]
10735#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10736#[cfg(not(target_arch = "arm64ec"))]
10737pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10738    r: float32x2_t,
10739    a: float16x4_t,
10740    b: float16x4_t,
10741) -> float32x2_t {
10742    static_assert_uimm_bits!(LANE, 2);
10743    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10744}
10745#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10747#[inline]
10748#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10749#[target_feature(enable = "neon,fp16")]
10750#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10751#[rustc_legacy_const_generics(3)]
10752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10753#[cfg(not(target_arch = "arm64ec"))]
10754pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10755    r: float32x2_t,
10756    a: float16x4_t,
10757    b: float16x8_t,
10758) -> float32x2_t {
10759    static_assert_uimm_bits!(LANE, 3);
10760    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10761}
10762#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10764#[inline]
10765#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10766#[target_feature(enable = "neon,fp16")]
10767#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10768#[rustc_legacy_const_generics(3)]
10769#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10770#[cfg(not(target_arch = "arm64ec"))]
10771pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10772    r: float32x4_t,
10773    a: float16x8_t,
10774    b: float16x4_t,
10775) -> float32x4_t {
10776    static_assert_uimm_bits!(LANE, 2);
10777    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10778}
10779#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10781#[inline]
10782#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10783#[target_feature(enable = "neon,fp16")]
10784#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10785#[rustc_legacy_const_generics(3)]
10786#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10787#[cfg(not(target_arch = "arm64ec"))]
10788pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10789    r: float32x4_t,
10790    a: float16x8_t,
10791    b: float16x8_t,
10792) -> float32x4_t {
10793    static_assert_uimm_bits!(LANE, 3);
10794    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10795}
10796#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10798#[inline]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10802#[cfg(not(target_arch = "arm64ec"))]
10803#[cfg_attr(test, assert_instr(fmlsl))]
10804pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10805    unsafe extern "unadjusted" {
10806        #[cfg_attr(
10807            any(target_arch = "aarch64", target_arch = "arm64ec"),
10808            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10809        )]
10810        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10811    }
10812    unsafe { _vfmlsl_low_f16(r, a, b) }
10813}
10814#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10816#[inline]
10817#[target_feature(enable = "neon,fp16")]
10818#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10820#[cfg(not(target_arch = "arm64ec"))]
10821#[cfg_attr(test, assert_instr(fmlsl))]
10822pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10823    unsafe extern "unadjusted" {
10824        #[cfg_attr(
10825            any(target_arch = "aarch64", target_arch = "arm64ec"),
10826            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10827        )]
10828        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10829    }
10830    unsafe { _vfmlslq_low_f16(r, a, b) }
10831}
10832#[doc = "Floating-point fused multiply-subtract from accumulator"]
10833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10834#[inline]
10835#[target_feature(enable = "neon")]
10836#[cfg_attr(test, assert_instr(fmsub))]
10837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10838pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10839    unsafe {
10840        let b: float64x1_t = simd_neg(b);
10841        vfma_f64(a, b, c)
10842    }
10843}
10844#[doc = "Floating-point fused multiply-subtract from accumulator"]
10845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10846#[inline]
10847#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10848#[rustc_legacy_const_generics(3)]
10849#[target_feature(enable = "neon,fp16")]
10850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10851#[cfg(not(target_arch = "arm64ec"))]
10852pub fn vfms_lane_f16<const LANE: i32>(
10853    a: float16x4_t,
10854    b: float16x4_t,
10855    c: float16x4_t,
10856) -> float16x4_t {
10857    static_assert_uimm_bits!(LANE, 2);
10858    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10862#[inline]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_laneq_f16<const LANE: i32>(
10869    a: float16x4_t,
10870    b: float16x4_t,
10871    c: float16x8_t,
10872) -> float16x4_t {
10873    static_assert_uimm_bits!(LANE, 3);
10874    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10878#[inline]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfmsq_lane_f16<const LANE: i32>(
10885    a: float16x8_t,
10886    b: float16x8_t,
10887    c: float16x4_t,
10888) -> float16x8_t {
10889    static_assert_uimm_bits!(LANE, 2);
10890    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10894#[inline]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_laneq_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x8_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 3);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract to accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10910#[inline]
10911#[target_feature(enable = "neon")]
10912#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10913#[rustc_legacy_const_generics(3)]
10914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10915pub fn vfms_lane_f32<const LANE: i32>(
10916    a: float32x2_t,
10917    b: float32x2_t,
10918    c: float32x2_t,
10919) -> float32x2_t {
10920    static_assert_uimm_bits!(LANE, 1);
10921    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10922}
10923#[doc = "Floating-point fused multiply-subtract to accumulator"]
10924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10925#[inline]
10926#[target_feature(enable = "neon")]
10927#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10928#[rustc_legacy_const_generics(3)]
10929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10930pub fn vfms_laneq_f32<const LANE: i32>(
10931    a: float32x2_t,
10932    b: float32x2_t,
10933    c: float32x4_t,
10934) -> float32x2_t {
10935    static_assert_uimm_bits!(LANE, 2);
10936    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10937}
10938#[doc = "Floating-point fused multiply-subtract to accumulator"]
10939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10940#[inline]
10941#[target_feature(enable = "neon")]
10942#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10943#[rustc_legacy_const_generics(3)]
10944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10945pub fn vfmsq_lane_f32<const LANE: i32>(
10946    a: float32x4_t,
10947    b: float32x4_t,
10948    c: float32x2_t,
10949) -> float32x4_t {
10950    static_assert_uimm_bits!(LANE, 1);
10951    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10952}
10953#[doc = "Floating-point fused multiply-subtract to accumulator"]
10954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10955#[inline]
10956#[target_feature(enable = "neon")]
10957#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10958#[rustc_legacy_const_generics(3)]
10959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10960pub fn vfmsq_laneq_f32<const LANE: i32>(
10961    a: float32x4_t,
10962    b: float32x4_t,
10963    c: float32x4_t,
10964) -> float32x4_t {
10965    static_assert_uimm_bits!(LANE, 2);
10966    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10967}
10968#[doc = "Floating-point fused multiply-subtract to accumulator"]
10969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10970#[inline]
10971#[target_feature(enable = "neon")]
10972#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10973#[rustc_legacy_const_generics(3)]
10974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10975pub fn vfmsq_laneq_f64<const LANE: i32>(
10976    a: float64x2_t,
10977    b: float64x2_t,
10978    c: float64x2_t,
10979) -> float64x2_t {
10980    static_assert_uimm_bits!(LANE, 1);
10981    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10982}
10983#[doc = "Floating-point fused multiply-subtract to accumulator"]
10984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10985#[inline]
10986#[target_feature(enable = "neon")]
10987#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10988#[rustc_legacy_const_generics(3)]
10989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10990pub fn vfms_lane_f64<const LANE: i32>(
10991    a: float64x1_t,
10992    b: float64x1_t,
10993    c: float64x1_t,
10994) -> float64x1_t {
10995    static_assert!(LANE == 0);
10996    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10997}
10998#[doc = "Floating-point fused multiply-subtract to accumulator"]
10999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11000#[inline]
11001#[target_feature(enable = "neon")]
11002#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11003#[rustc_legacy_const_generics(3)]
11004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11005pub fn vfms_laneq_f64<const LANE: i32>(
11006    a: float64x1_t,
11007    b: float64x1_t,
11008    c: float64x2_t,
11009) -> float64x1_t {
11010    static_assert_uimm_bits!(LANE, 1);
11011    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11012}
11013#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11015#[inline]
11016#[target_feature(enable = "neon,fp16")]
11017#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11018#[cfg(not(target_arch = "arm64ec"))]
11019#[cfg_attr(test, assert_instr(fmls))]
11020pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11021    vfms_f16(a, b, vdup_n_f16(c))
11022}
11023#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11025#[inline]
11026#[target_feature(enable = "neon,fp16")]
11027#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11028#[cfg(not(target_arch = "arm64ec"))]
11029#[cfg_attr(test, assert_instr(fmls))]
11030pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11031    vfmsq_f16(a, b, vdupq_n_f16(c))
11032}
11033#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11035#[inline]
11036#[target_feature(enable = "neon")]
11037#[cfg_attr(test, assert_instr(fmsub))]
11038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11039pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11040    vfms_f64(a, b, vdup_n_f64(c))
11041}
11042#[doc = "Floating-point fused multiply-subtract from accumulator"]
11043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11044#[inline]
11045#[cfg_attr(test, assert_instr(fmsub))]
11046#[target_feature(enable = "neon,fp16")]
11047#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11048#[cfg(not(target_arch = "arm64ec"))]
11049pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11050    vfmah_f16(a, -b, c)
11051}
11052#[doc = "Floating-point fused multiply-subtract from accumulator"]
11053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11054#[inline]
11055#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11056#[rustc_legacy_const_generics(3)]
11057#[target_feature(enable = "neon,fp16")]
11058#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11059#[cfg(not(target_arch = "arm64ec"))]
11060pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11061    static_assert_uimm_bits!(LANE, 2);
11062    unsafe {
11063        let c: f16 = simd_extract!(v, LANE as u32);
11064        vfmsh_f16(a, b, c)
11065    }
11066}
11067#[doc = "Floating-point fused multiply-subtract from accumulator"]
11068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11069#[inline]
11070#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11071#[rustc_legacy_const_generics(3)]
11072#[target_feature(enable = "neon,fp16")]
11073#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11074#[cfg(not(target_arch = "arm64ec"))]
11075pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11076    static_assert_uimm_bits!(LANE, 3);
11077    unsafe {
11078        let c: f16 = simd_extract!(v, LANE as u32);
11079        vfmsh_f16(a, b, c)
11080    }
11081}
11082#[doc = "Floating-point fused multiply-subtract from accumulator"]
11083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11084#[inline]
11085#[target_feature(enable = "neon")]
11086#[cfg_attr(test, assert_instr(fmls))]
11087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11088pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11089    unsafe {
11090        let b: float64x2_t = simd_neg(b);
11091        vfmaq_f64(a, b, c)
11092    }
11093}
11094#[doc = "Floating-point fused multiply-subtract to accumulator"]
11095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11096#[inline]
11097#[target_feature(enable = "neon")]
11098#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11099#[rustc_legacy_const_generics(3)]
11100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11101pub fn vfmsq_lane_f64<const LANE: i32>(
11102    a: float64x2_t,
11103    b: float64x2_t,
11104    c: float64x1_t,
11105) -> float64x2_t {
11106    static_assert!(LANE == 0);
11107    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11108}
11109#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11111#[inline]
11112#[target_feature(enable = "neon")]
11113#[cfg_attr(test, assert_instr(fmls))]
11114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11115pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11116    vfmsq_f64(a, b, vdupq_n_f64(c))
11117}
11118#[doc = "Floating-point fused multiply-subtract to accumulator"]
11119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11120#[inline]
11121#[target_feature(enable = "neon")]
11122#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11123#[rustc_legacy_const_generics(3)]
11124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11125pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11126    vfmas_lane_f32::<LANE>(a, -b, c)
11127}
11128#[doc = "Floating-point fused multiply-subtract to accumulator"]
11129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11130#[inline]
11131#[target_feature(enable = "neon")]
11132#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11133#[rustc_legacy_const_generics(3)]
11134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11135pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11136    vfmas_laneq_f32::<LANE>(a, -b, c)
11137}
11138#[doc = "Floating-point fused multiply-subtract to accumulator"]
11139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11140#[inline]
11141#[target_feature(enable = "neon")]
11142#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11143#[rustc_legacy_const_generics(3)]
11144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11145pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11146    vfmad_lane_f64::<LANE>(a, -b, c)
11147}
11148#[doc = "Floating-point fused multiply-subtract to accumulator"]
11149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11150#[inline]
11151#[target_feature(enable = "neon")]
11152#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11153#[rustc_legacy_const_generics(3)]
11154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11155pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11156    vfmad_laneq_f64::<LANE>(a, -b, c)
11157}
11158#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11160#[doc = "## Safety"]
11161#[doc = "  * Neon instrinsic unsafe"]
11162#[inline]
11163#[target_feature(enable = "neon,fp16")]
11164#[cfg_attr(test, assert_instr(ldr))]
11165#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11166#[cfg(not(target_arch = "arm64ec"))]
11167pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11168    crate::ptr::read_unaligned(ptr.cast())
11169}
11170#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11172#[doc = "## Safety"]
11173#[doc = "  * Neon instrinsic unsafe"]
11174#[inline]
11175#[target_feature(enable = "neon,fp16")]
11176#[cfg_attr(test, assert_instr(ldr))]
11177#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11178#[cfg(not(target_arch = "arm64ec"))]
11179pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11180    crate::ptr::read_unaligned(ptr.cast())
11181}
11182#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11184#[doc = "## Safety"]
11185#[doc = "  * Neon instrinsic unsafe"]
11186#[inline]
11187#[target_feature(enable = "neon")]
11188#[cfg_attr(test, assert_instr(ldr))]
11189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11190pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11191    crate::ptr::read_unaligned(ptr.cast())
11192}
11193#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11195#[doc = "## Safety"]
11196#[doc = "  * Neon instrinsic unsafe"]
11197#[inline]
11198#[target_feature(enable = "neon")]
11199#[cfg_attr(test, assert_instr(ldr))]
11200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11201pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11202    crate::ptr::read_unaligned(ptr.cast())
11203}
11204#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11206#[doc = "## Safety"]
11207#[doc = "  * Neon instrinsic unsafe"]
11208#[inline]
11209#[target_feature(enable = "neon")]
11210#[cfg_attr(test, assert_instr(ldr))]
11211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11212pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11213    crate::ptr::read_unaligned(ptr.cast())
11214}
11215#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11217#[doc = "## Safety"]
11218#[doc = "  * Neon instrinsic unsafe"]
11219#[inline]
11220#[target_feature(enable = "neon")]
11221#[cfg_attr(test, assert_instr(ldr))]
11222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11223pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11224    crate::ptr::read_unaligned(ptr.cast())
11225}
11226#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11228#[doc = "## Safety"]
11229#[doc = "  * Neon instrinsic unsafe"]
11230#[inline]
11231#[target_feature(enable = "neon")]
11232#[cfg_attr(test, assert_instr(ldr))]
11233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11234pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11235    crate::ptr::read_unaligned(ptr.cast())
11236}
11237#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11239#[doc = "## Safety"]
11240#[doc = "  * Neon instrinsic unsafe"]
11241#[inline]
11242#[target_feature(enable = "neon")]
11243#[cfg_attr(test, assert_instr(ldr))]
11244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11245pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11246    crate::ptr::read_unaligned(ptr.cast())
11247}
11248#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11250#[doc = "## Safety"]
11251#[doc = "  * Neon instrinsic unsafe"]
11252#[inline]
11253#[target_feature(enable = "neon")]
11254#[cfg_attr(test, assert_instr(ldr))]
11255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11256pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11257    crate::ptr::read_unaligned(ptr.cast())
11258}
11259#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11261#[doc = "## Safety"]
11262#[doc = "  * Neon instrinsic unsafe"]
11263#[inline]
11264#[target_feature(enable = "neon")]
11265#[cfg_attr(test, assert_instr(ldr))]
11266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11267pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11268    crate::ptr::read_unaligned(ptr.cast())
11269}
11270#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11272#[doc = "## Safety"]
11273#[doc = "  * Neon instrinsic unsafe"]
11274#[inline]
11275#[target_feature(enable = "neon")]
11276#[cfg_attr(test, assert_instr(ldr))]
11277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11278pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11279    crate::ptr::read_unaligned(ptr.cast())
11280}
11281#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11283#[doc = "## Safety"]
11284#[doc = "  * Neon instrinsic unsafe"]
11285#[inline]
11286#[target_feature(enable = "neon")]
11287#[cfg_attr(test, assert_instr(ldr))]
11288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11289pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11290    crate::ptr::read_unaligned(ptr.cast())
11291}
11292#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11294#[doc = "## Safety"]
11295#[doc = "  * Neon instrinsic unsafe"]
11296#[inline]
11297#[target_feature(enable = "neon")]
11298#[cfg_attr(test, assert_instr(ldr))]
11299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11300pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11301    crate::ptr::read_unaligned(ptr.cast())
11302}
11303#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11305#[doc = "## Safety"]
11306#[doc = "  * Neon instrinsic unsafe"]
11307#[inline]
11308#[target_feature(enable = "neon")]
11309#[cfg_attr(test, assert_instr(ldr))]
11310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11311pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11312    crate::ptr::read_unaligned(ptr.cast())
11313}
11314#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11316#[doc = "## Safety"]
11317#[doc = "  * Neon instrinsic unsafe"]
11318#[inline]
11319#[target_feature(enable = "neon")]
11320#[cfg_attr(test, assert_instr(ldr))]
11321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11322pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11323    crate::ptr::read_unaligned(ptr.cast())
11324}
11325#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11327#[doc = "## Safety"]
11328#[doc = "  * Neon instrinsic unsafe"]
11329#[inline]
11330#[target_feature(enable = "neon")]
11331#[cfg_attr(test, assert_instr(ldr))]
11332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11333pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11334    crate::ptr::read_unaligned(ptr.cast())
11335}
11336#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11338#[doc = "## Safety"]
11339#[doc = "  * Neon instrinsic unsafe"]
11340#[inline]
11341#[target_feature(enable = "neon")]
11342#[cfg_attr(test, assert_instr(ldr))]
11343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11344pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11345    crate::ptr::read_unaligned(ptr.cast())
11346}
11347#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11349#[doc = "## Safety"]
11350#[doc = "  * Neon instrinsic unsafe"]
11351#[inline]
11352#[target_feature(enable = "neon")]
11353#[cfg_attr(test, assert_instr(ldr))]
11354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11355pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11356    crate::ptr::read_unaligned(ptr.cast())
11357}
11358#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11360#[doc = "## Safety"]
11361#[doc = "  * Neon instrinsic unsafe"]
11362#[inline]
11363#[target_feature(enable = "neon")]
11364#[cfg_attr(test, assert_instr(ldr))]
11365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11366pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11367    crate::ptr::read_unaligned(ptr.cast())
11368}
11369#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11371#[doc = "## Safety"]
11372#[doc = "  * Neon instrinsic unsafe"]
11373#[inline]
11374#[target_feature(enable = "neon")]
11375#[cfg_attr(test, assert_instr(ldr))]
11376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11377pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11378    crate::ptr::read_unaligned(ptr.cast())
11379}
11380#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11382#[doc = "## Safety"]
11383#[doc = "  * Neon instrinsic unsafe"]
11384#[inline]
11385#[target_feature(enable = "neon")]
11386#[cfg_attr(test, assert_instr(ldr))]
11387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11388pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11389    crate::ptr::read_unaligned(ptr.cast())
11390}
11391#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11393#[doc = "## Safety"]
11394#[doc = "  * Neon instrinsic unsafe"]
11395#[inline]
11396#[target_feature(enable = "neon")]
11397#[cfg_attr(test, assert_instr(ldr))]
11398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11399pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11400    crate::ptr::read_unaligned(ptr.cast())
11401}
11402#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11404#[doc = "## Safety"]
11405#[doc = "  * Neon instrinsic unsafe"]
11406#[inline]
11407#[target_feature(enable = "neon")]
11408#[cfg_attr(test, assert_instr(ldr))]
11409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11410pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11411    crate::ptr::read_unaligned(ptr.cast())
11412}
11413#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11415#[doc = "## Safety"]
11416#[doc = "  * Neon instrinsic unsafe"]
11417#[inline]
11418#[target_feature(enable = "neon")]
11419#[cfg_attr(test, assert_instr(ldr))]
11420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11421pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11422    crate::ptr::read_unaligned(ptr.cast())
11423}
11424#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11426#[doc = "## Safety"]
11427#[doc = "  * Neon instrinsic unsafe"]
11428#[inline]
11429#[target_feature(enable = "neon")]
11430#[cfg_attr(test, assert_instr(ldr))]
11431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11432pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11433    crate::ptr::read_unaligned(ptr.cast())
11434}
11435#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11437#[doc = "## Safety"]
11438#[doc = "  * Neon instrinsic unsafe"]
11439#[inline]
11440#[target_feature(enable = "neon")]
11441#[cfg_attr(test, assert_instr(ldr))]
11442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11443pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11444    crate::ptr::read_unaligned(ptr.cast())
11445}
11446#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11448#[doc = "## Safety"]
11449#[doc = "  * Neon instrinsic unsafe"]
11450#[inline]
11451#[target_feature(enable = "neon,aes")]
11452#[cfg_attr(test, assert_instr(ldr))]
11453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11454pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11455    crate::ptr::read_unaligned(ptr.cast())
11456}
11457#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11459#[doc = "## Safety"]
11460#[doc = "  * Neon instrinsic unsafe"]
11461#[inline]
11462#[target_feature(enable = "neon,aes")]
11463#[cfg_attr(test, assert_instr(ldr))]
11464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11465pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11466    crate::ptr::read_unaligned(ptr.cast())
11467}
11468#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11470#[doc = "## Safety"]
11471#[doc = "  * Neon instrinsic unsafe"]
11472#[inline]
11473#[target_feature(enable = "neon")]
11474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11475#[cfg_attr(test, assert_instr(ld1))]
11476pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11477    unsafe extern "unadjusted" {
11478        #[cfg_attr(
11479            any(target_arch = "aarch64", target_arch = "arm64ec"),
11480            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11481        )]
11482        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11483    }
11484    _vld1_f64_x2(a)
11485}
11486#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11488#[doc = "## Safety"]
11489#[doc = "  * Neon instrinsic unsafe"]
11490#[inline]
11491#[target_feature(enable = "neon")]
11492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11493#[cfg_attr(test, assert_instr(ld1))]
11494pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11495    unsafe extern "unadjusted" {
11496        #[cfg_attr(
11497            any(target_arch = "aarch64", target_arch = "arm64ec"),
11498            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11499        )]
11500        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11501    }
11502    _vld1_f64_x3(a)
11503}
11504#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11506#[doc = "## Safety"]
11507#[doc = "  * Neon instrinsic unsafe"]
11508#[inline]
11509#[target_feature(enable = "neon")]
11510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11511#[cfg_attr(test, assert_instr(ld1))]
11512pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11513    unsafe extern "unadjusted" {
11514        #[cfg_attr(
11515            any(target_arch = "aarch64", target_arch = "arm64ec"),
11516            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11517        )]
11518        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11519    }
11520    _vld1_f64_x4(a)
11521}
11522#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11524#[doc = "## Safety"]
11525#[doc = "  * Neon instrinsic unsafe"]
11526#[inline]
11527#[target_feature(enable = "neon")]
11528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11529#[cfg_attr(test, assert_instr(ld1))]
11530pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11531    unsafe extern "unadjusted" {
11532        #[cfg_attr(
11533            any(target_arch = "aarch64", target_arch = "arm64ec"),
11534            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11535        )]
11536        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11537    }
11538    _vld1q_f64_x2(a)
11539}
11540#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11542#[doc = "## Safety"]
11543#[doc = "  * Neon instrinsic unsafe"]
11544#[inline]
11545#[target_feature(enable = "neon")]
11546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11547#[cfg_attr(test, assert_instr(ld1))]
11548pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11549    unsafe extern "unadjusted" {
11550        #[cfg_attr(
11551            any(target_arch = "aarch64", target_arch = "arm64ec"),
11552            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11553        )]
11554        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11555    }
11556    _vld1q_f64_x3(a)
11557}
11558#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11560#[doc = "## Safety"]
11561#[doc = "  * Neon instrinsic unsafe"]
11562#[inline]
11563#[target_feature(enable = "neon")]
11564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11565#[cfg_attr(test, assert_instr(ld1))]
11566pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11567    unsafe extern "unadjusted" {
11568        #[cfg_attr(
11569            any(target_arch = "aarch64", target_arch = "arm64ec"),
11570            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11571        )]
11572        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11573    }
11574    _vld1q_f64_x4(a)
11575}
11576#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11578#[doc = "## Safety"]
11579#[doc = "  * Neon instrinsic unsafe"]
11580#[inline]
11581#[target_feature(enable = "neon")]
11582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11583#[cfg_attr(test, assert_instr(ld2r))]
11584pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11585    unsafe extern "unadjusted" {
11586        #[cfg_attr(
11587            any(target_arch = "aarch64", target_arch = "arm64ec"),
11588            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11589        )]
11590        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11591    }
11592    _vld2_dup_f64(a as _)
11593}
11594#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11596#[doc = "## Safety"]
11597#[doc = "  * Neon instrinsic unsafe"]
11598#[inline]
11599#[target_feature(enable = "neon")]
11600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11601#[cfg_attr(test, assert_instr(ld2r))]
11602pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11603    unsafe extern "unadjusted" {
11604        #[cfg_attr(
11605            any(target_arch = "aarch64", target_arch = "arm64ec"),
11606            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11607        )]
11608        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11609    }
11610    _vld2q_dup_f64(a as _)
11611}
11612#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11614#[doc = "## Safety"]
11615#[doc = "  * Neon instrinsic unsafe"]
11616#[inline]
11617#[target_feature(enable = "neon")]
11618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11619#[cfg_attr(test, assert_instr(ld2r))]
11620pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11621    unsafe extern "unadjusted" {
11622        #[cfg_attr(
11623            any(target_arch = "aarch64", target_arch = "arm64ec"),
11624            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11625        )]
11626        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11627    }
11628    _vld2q_dup_s64(a as _)
11629}
11630#[doc = "Load multiple 2-element structures to two registers"]
11631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11632#[doc = "## Safety"]
11633#[doc = "  * Neon instrinsic unsafe"]
11634#[inline]
11635#[target_feature(enable = "neon")]
11636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11637#[cfg_attr(test, assert_instr(nop))]
11638pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11639    unsafe extern "unadjusted" {
11640        #[cfg_attr(
11641            any(target_arch = "aarch64", target_arch = "arm64ec"),
11642            link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11643        )]
11644        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11645    }
11646    _vld2_f64(a as _)
11647}
11648#[doc = "Load multiple 2-element structures to two registers"]
11649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11650#[doc = "## Safety"]
11651#[doc = "  * Neon instrinsic unsafe"]
11652#[inline]
11653#[target_feature(enable = "neon")]
11654#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11655#[rustc_legacy_const_generics(2)]
11656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11657pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11658    static_assert!(LANE == 0);
11659    unsafe extern "unadjusted" {
11660        #[cfg_attr(
11661            any(target_arch = "aarch64", target_arch = "arm64ec"),
11662            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11663        )]
11664        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11665    }
11666    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11667}
11668#[doc = "Load multiple 2-element structures to two registers"]
11669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11670#[doc = "## Safety"]
11671#[doc = "  * Neon instrinsic unsafe"]
11672#[inline]
11673#[target_feature(enable = "neon")]
11674#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11675#[rustc_legacy_const_generics(2)]
11676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11677pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11678    static_assert!(LANE == 0);
11679    unsafe extern "unadjusted" {
11680        #[cfg_attr(
11681            any(target_arch = "aarch64", target_arch = "arm64ec"),
11682            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11683        )]
11684        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11685    }
11686    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11687}
11688#[doc = "Load multiple 2-element structures to two registers"]
11689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11690#[doc = "## Safety"]
11691#[doc = "  * Neon instrinsic unsafe"]
11692#[inline]
11693#[target_feature(enable = "neon,aes")]
11694#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11695#[rustc_legacy_const_generics(2)]
11696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11697pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11698    static_assert!(LANE == 0);
11699    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11700}
11701#[doc = "Load multiple 2-element structures to two registers"]
11702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11703#[doc = "## Safety"]
11704#[doc = "  * Neon instrinsic unsafe"]
11705#[inline]
11706#[target_feature(enable = "neon")]
11707#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11708#[rustc_legacy_const_generics(2)]
11709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11710pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11711    static_assert!(LANE == 0);
11712    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11713}
11714#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11716#[doc = "## Safety"]
11717#[doc = "  * Neon instrinsic unsafe"]
11718#[inline]
11719#[cfg(target_endian = "little")]
11720#[target_feature(enable = "neon,aes")]
11721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11722#[cfg_attr(test, assert_instr(ld2r))]
11723pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11724    transmute(vld2q_dup_s64(transmute(a)))
11725}
11726#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11728#[doc = "## Safety"]
11729#[doc = "  * Neon instrinsic unsafe"]
11730#[inline]
11731#[cfg(target_endian = "big")]
11732#[target_feature(enable = "neon,aes")]
11733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11734#[cfg_attr(test, assert_instr(ld2r))]
11735pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11736    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11737    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11738    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11739    ret_val
11740}
11741#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11743#[doc = "## Safety"]
11744#[doc = "  * Neon instrinsic unsafe"]
11745#[inline]
11746#[cfg(target_endian = "little")]
11747#[target_feature(enable = "neon")]
11748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11749#[cfg_attr(test, assert_instr(ld2r))]
11750pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11751    transmute(vld2q_dup_s64(transmute(a)))
11752}
11753#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11755#[doc = "## Safety"]
11756#[doc = "  * Neon instrinsic unsafe"]
11757#[inline]
11758#[cfg(target_endian = "big")]
11759#[target_feature(enable = "neon")]
11760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11761#[cfg_attr(test, assert_instr(ld2r))]
11762pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11763    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11764    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11765    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11766    ret_val
11767}
11768#[doc = "Load multiple 2-element structures to two registers"]
11769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11770#[doc = "## Safety"]
11771#[doc = "  * Neon instrinsic unsafe"]
11772#[inline]
11773#[target_feature(enable = "neon")]
11774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11775#[cfg_attr(test, assert_instr(ld2))]
11776pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11777    unsafe extern "unadjusted" {
11778        #[cfg_attr(
11779            any(target_arch = "aarch64", target_arch = "arm64ec"),
11780            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11781        )]
11782        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11783    }
11784    _vld2q_f64(a as _)
11785}
11786#[doc = "Load multiple 2-element structures to two registers"]
11787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11788#[doc = "## Safety"]
11789#[doc = "  * Neon instrinsic unsafe"]
11790#[inline]
11791#[target_feature(enable = "neon")]
11792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11793#[cfg_attr(test, assert_instr(ld2))]
11794pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11795    unsafe extern "unadjusted" {
11796        #[cfg_attr(
11797            any(target_arch = "aarch64", target_arch = "arm64ec"),
11798            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11799        )]
11800        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11801    }
11802    _vld2q_s64(a as _)
11803}
11804#[doc = "Load multiple 2-element structures to two registers"]
11805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11806#[doc = "## Safety"]
11807#[doc = "  * Neon instrinsic unsafe"]
11808#[inline]
11809#[target_feature(enable = "neon")]
11810#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11811#[rustc_legacy_const_generics(2)]
11812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11813pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11814    static_assert_uimm_bits!(LANE, 1);
11815    unsafe extern "unadjusted" {
11816        #[cfg_attr(
11817            any(target_arch = "aarch64", target_arch = "arm64ec"),
11818            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11819        )]
11820        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11821            -> float64x2x2_t;
11822    }
11823    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11824}
11825#[doc = "Load multiple 2-element structures to two registers"]
11826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11827#[doc = "## Safety"]
11828#[doc = "  * Neon instrinsic unsafe"]
11829#[inline]
11830#[target_feature(enable = "neon")]
11831#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11832#[rustc_legacy_const_generics(2)]
11833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11834pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11835    static_assert_uimm_bits!(LANE, 4);
11836    unsafe extern "unadjusted" {
11837        #[cfg_attr(
11838            any(target_arch = "aarch64", target_arch = "arm64ec"),
11839            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11840        )]
11841        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11842    }
11843    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11844}
11845#[doc = "Load multiple 2-element structures to two registers"]
11846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11847#[doc = "## Safety"]
11848#[doc = "  * Neon instrinsic unsafe"]
11849#[inline]
11850#[target_feature(enable = "neon")]
11851#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11852#[rustc_legacy_const_generics(2)]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11855    static_assert_uimm_bits!(LANE, 1);
11856    unsafe extern "unadjusted" {
11857        #[cfg_attr(
11858            any(target_arch = "aarch64", target_arch = "arm64ec"),
11859            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11860        )]
11861        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11862    }
11863    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11864}
11865#[doc = "Load multiple 2-element structures to two registers"]
11866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11867#[doc = "## Safety"]
11868#[doc = "  * Neon instrinsic unsafe"]
11869#[inline]
11870#[target_feature(enable = "neon,aes")]
11871#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11872#[rustc_legacy_const_generics(2)]
11873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11874pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11875    static_assert_uimm_bits!(LANE, 1);
11876    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11877}
11878#[doc = "Load multiple 2-element structures to two registers"]
11879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11880#[doc = "## Safety"]
11881#[doc = "  * Neon instrinsic unsafe"]
11882#[inline]
11883#[target_feature(enable = "neon")]
11884#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11885#[rustc_legacy_const_generics(2)]
11886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11887pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11888    static_assert_uimm_bits!(LANE, 4);
11889    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11890}
11891#[doc = "Load multiple 2-element structures to two registers"]
11892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11893#[doc = "## Safety"]
11894#[doc = "  * Neon instrinsic unsafe"]
11895#[inline]
11896#[target_feature(enable = "neon")]
11897#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11898#[rustc_legacy_const_generics(2)]
11899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11900pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11901    static_assert_uimm_bits!(LANE, 1);
11902    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11903}
11904#[doc = "Load multiple 2-element structures to two registers"]
11905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11906#[doc = "## Safety"]
11907#[doc = "  * Neon instrinsic unsafe"]
11908#[inline]
11909#[target_feature(enable = "neon")]
11910#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11911#[rustc_legacy_const_generics(2)]
11912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11913pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11914    static_assert_uimm_bits!(LANE, 4);
11915    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11916}
11917#[doc = "Load multiple 2-element structures to two registers"]
11918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11919#[doc = "## Safety"]
11920#[doc = "  * Neon instrinsic unsafe"]
11921#[inline]
11922#[cfg(target_endian = "little")]
11923#[target_feature(enable = "neon,aes")]
11924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11925#[cfg_attr(test, assert_instr(ld2))]
11926pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11927    transmute(vld2q_s64(transmute(a)))
11928}
11929#[doc = "Load multiple 2-element structures to two registers"]
11930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11931#[doc = "## Safety"]
11932#[doc = "  * Neon instrinsic unsafe"]
11933#[inline]
11934#[cfg(target_endian = "big")]
11935#[target_feature(enable = "neon,aes")]
11936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11937#[cfg_attr(test, assert_instr(ld2))]
11938pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11939    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11940    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11941    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11942    ret_val
11943}
11944#[doc = "Load multiple 2-element structures to two registers"]
11945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11946#[doc = "## Safety"]
11947#[doc = "  * Neon instrinsic unsafe"]
11948#[inline]
11949#[cfg(target_endian = "little")]
11950#[target_feature(enable = "neon")]
11951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11952#[cfg_attr(test, assert_instr(ld2))]
11953pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11954    transmute(vld2q_s64(transmute(a)))
11955}
11956#[doc = "Load multiple 2-element structures to two registers"]
11957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11958#[doc = "## Safety"]
11959#[doc = "  * Neon instrinsic unsafe"]
11960#[inline]
11961#[cfg(target_endian = "big")]
11962#[target_feature(enable = "neon")]
11963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11964#[cfg_attr(test, assert_instr(ld2))]
11965pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11966    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11967    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11968    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11969    ret_val
11970}
11971#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11973#[doc = "## Safety"]
11974#[doc = "  * Neon instrinsic unsafe"]
11975#[inline]
11976#[target_feature(enable = "neon")]
11977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11978#[cfg_attr(test, assert_instr(ld3r))]
11979pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11980    unsafe extern "unadjusted" {
11981        #[cfg_attr(
11982            any(target_arch = "aarch64", target_arch = "arm64ec"),
11983            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11984        )]
11985        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11986    }
11987    _vld3_dup_f64(a as _)
11988}
11989#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11991#[doc = "## Safety"]
11992#[doc = "  * Neon instrinsic unsafe"]
11993#[inline]
11994#[target_feature(enable = "neon")]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996#[cfg_attr(test, assert_instr(ld3r))]
11997pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11998    unsafe extern "unadjusted" {
11999        #[cfg_attr(
12000            any(target_arch = "aarch64", target_arch = "arm64ec"),
12001            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
12002        )]
12003        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
12004    }
12005    _vld3q_dup_f64(a as _)
12006}
12007#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
12009#[doc = "## Safety"]
12010#[doc = "  * Neon instrinsic unsafe"]
12011#[inline]
12012#[target_feature(enable = "neon")]
12013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12014#[cfg_attr(test, assert_instr(ld3r))]
12015pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
12016    unsafe extern "unadjusted" {
12017        #[cfg_attr(
12018            any(target_arch = "aarch64", target_arch = "arm64ec"),
12019            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
12020        )]
12021        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
12022    }
12023    _vld3q_dup_s64(a as _)
12024}
12025#[doc = "Load multiple 3-element structures to three registers"]
12026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
12027#[doc = "## Safety"]
12028#[doc = "  * Neon instrinsic unsafe"]
12029#[inline]
12030#[target_feature(enable = "neon")]
12031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12032#[cfg_attr(test, assert_instr(nop))]
12033pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
12034    unsafe extern "unadjusted" {
12035        #[cfg_attr(
12036            any(target_arch = "aarch64", target_arch = "arm64ec"),
12037            link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
12038        )]
12039        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
12040    }
12041    _vld3_f64(a as _)
12042}
12043#[doc = "Load multiple 3-element structures to three registers"]
12044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
12045#[doc = "## Safety"]
12046#[doc = "  * Neon instrinsic unsafe"]
12047#[inline]
12048#[target_feature(enable = "neon")]
12049#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12050#[rustc_legacy_const_generics(2)]
12051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12052pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
12053    static_assert!(LANE == 0);
12054    unsafe extern "unadjusted" {
12055        #[cfg_attr(
12056            any(target_arch = "aarch64", target_arch = "arm64ec"),
12057            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12058        )]
12059        fn _vld3_lane_f64(
12060            a: float64x1_t,
12061            b: float64x1_t,
12062            c: float64x1_t,
12063            n: i64,
12064            ptr: *const i8,
12065        ) -> float64x1x3_t;
12066    }
12067    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12068}
12069#[doc = "Load multiple 3-element structures to three registers"]
12070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12071#[doc = "## Safety"]
12072#[doc = "  * Neon instrinsic unsafe"]
12073#[inline]
12074#[target_feature(enable = "neon,aes")]
12075#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12076#[rustc_legacy_const_generics(2)]
12077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12078pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12079    static_assert!(LANE == 0);
12080    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12081}
12082#[doc = "Load multiple 3-element structures to two registers"]
12083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12084#[doc = "## Safety"]
12085#[doc = "  * Neon instrinsic unsafe"]
12086#[inline]
12087#[target_feature(enable = "neon")]
12088#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12089#[rustc_legacy_const_generics(2)]
12090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12091pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12092    static_assert!(LANE == 0);
12093    unsafe extern "unadjusted" {
12094        #[cfg_attr(
12095            any(target_arch = "aarch64", target_arch = "arm64ec"),
12096            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12097        )]
12098        fn _vld3_lane_s64(
12099            a: int64x1_t,
12100            b: int64x1_t,
12101            c: int64x1_t,
12102            n: i64,
12103            ptr: *const i8,
12104        ) -> int64x1x3_t;
12105    }
12106    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12107}
12108#[doc = "Load multiple 3-element structures to three registers"]
12109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12110#[doc = "## Safety"]
12111#[doc = "  * Neon instrinsic unsafe"]
12112#[inline]
12113#[target_feature(enable = "neon")]
12114#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12115#[rustc_legacy_const_generics(2)]
12116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12117pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12118    static_assert!(LANE == 0);
12119    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12120}
12121#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12123#[doc = "## Safety"]
12124#[doc = "  * Neon instrinsic unsafe"]
12125#[inline]
12126#[cfg(target_endian = "little")]
12127#[target_feature(enable = "neon,aes")]
12128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12129#[cfg_attr(test, assert_instr(ld3r))]
12130pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12131    transmute(vld3q_dup_s64(transmute(a)))
12132}
12133#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12135#[doc = "## Safety"]
12136#[doc = "  * Neon instrinsic unsafe"]
12137#[inline]
12138#[cfg(target_endian = "big")]
12139#[target_feature(enable = "neon,aes")]
12140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12141#[cfg_attr(test, assert_instr(ld3r))]
12142pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12143    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12144    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12145    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12146    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12147    ret_val
12148}
12149#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12151#[doc = "## Safety"]
12152#[doc = "  * Neon instrinsic unsafe"]
12153#[inline]
12154#[cfg(target_endian = "little")]
12155#[target_feature(enable = "neon")]
12156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12157#[cfg_attr(test, assert_instr(ld3r))]
12158pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12159    transmute(vld3q_dup_s64(transmute(a)))
12160}
12161#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12163#[doc = "## Safety"]
12164#[doc = "  * Neon instrinsic unsafe"]
12165#[inline]
12166#[cfg(target_endian = "big")]
12167#[target_feature(enable = "neon")]
12168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12169#[cfg_attr(test, assert_instr(ld3r))]
12170pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12171    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12172    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12173    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12174    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12175    ret_val
12176}
12177#[doc = "Load multiple 3-element structures to three registers"]
12178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12179#[doc = "## Safety"]
12180#[doc = "  * Neon instrinsic unsafe"]
12181#[inline]
12182#[target_feature(enable = "neon")]
12183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12184#[cfg_attr(test, assert_instr(ld3))]
12185pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12186    unsafe extern "unadjusted" {
12187        #[cfg_attr(
12188            any(target_arch = "aarch64", target_arch = "arm64ec"),
12189            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12190        )]
12191        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12192    }
12193    _vld3q_f64(a as _)
12194}
12195#[doc = "Load multiple 3-element structures to three registers"]
12196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12197#[doc = "## Safety"]
12198#[doc = "  * Neon instrinsic unsafe"]
12199#[inline]
12200#[target_feature(enable = "neon")]
12201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12202#[cfg_attr(test, assert_instr(ld3))]
12203pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12204    unsafe extern "unadjusted" {
12205        #[cfg_attr(
12206            any(target_arch = "aarch64", target_arch = "arm64ec"),
12207            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12208        )]
12209        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12210    }
12211    _vld3q_s64(a as _)
12212}
12213#[doc = "Load multiple 3-element structures to three registers"]
12214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12215#[doc = "## Safety"]
12216#[doc = "  * Neon instrinsic unsafe"]
12217#[inline]
12218#[target_feature(enable = "neon")]
12219#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12220#[rustc_legacy_const_generics(2)]
12221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12222pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12223    static_assert_uimm_bits!(LANE, 1);
12224    unsafe extern "unadjusted" {
12225        #[cfg_attr(
12226            any(target_arch = "aarch64", target_arch = "arm64ec"),
12227            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12228        )]
12229        fn _vld3q_lane_f64(
12230            a: float64x2_t,
12231            b: float64x2_t,
12232            c: float64x2_t,
12233            n: i64,
12234            ptr: *const i8,
12235        ) -> float64x2x3_t;
12236    }
12237    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12238}
12239#[doc = "Load multiple 3-element structures to three registers"]
12240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12241#[doc = "## Safety"]
12242#[doc = "  * Neon instrinsic unsafe"]
12243#[inline]
12244#[target_feature(enable = "neon,aes")]
12245#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12246#[rustc_legacy_const_generics(2)]
12247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12248pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12249    static_assert_uimm_bits!(LANE, 1);
12250    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12251}
12252#[doc = "Load multiple 3-element structures to two registers"]
12253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12254#[doc = "## Safety"]
12255#[doc = "  * Neon instrinsic unsafe"]
12256#[inline]
12257#[target_feature(enable = "neon")]
12258#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12259#[rustc_legacy_const_generics(2)]
12260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12261pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12262    static_assert_uimm_bits!(LANE, 3);
12263    unsafe extern "unadjusted" {
12264        #[cfg_attr(
12265            any(target_arch = "aarch64", target_arch = "arm64ec"),
12266            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12267        )]
12268        fn _vld3q_lane_s8(
12269            a: int8x16_t,
12270            b: int8x16_t,
12271            c: int8x16_t,
12272            n: i64,
12273            ptr: *const i8,
12274        ) -> int8x16x3_t;
12275    }
12276    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12277}
12278#[doc = "Load multiple 3-element structures to two registers"]
12279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12280#[doc = "## Safety"]
12281#[doc = "  * Neon instrinsic unsafe"]
12282#[inline]
12283#[target_feature(enable = "neon")]
12284#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12285#[rustc_legacy_const_generics(2)]
12286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12287pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12288    static_assert_uimm_bits!(LANE, 1);
12289    unsafe extern "unadjusted" {
12290        #[cfg_attr(
12291            any(target_arch = "aarch64", target_arch = "arm64ec"),
12292            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12293        )]
12294        fn _vld3q_lane_s64(
12295            a: int64x2_t,
12296            b: int64x2_t,
12297            c: int64x2_t,
12298            n: i64,
12299            ptr: *const i8,
12300        ) -> int64x2x3_t;
12301    }
12302    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12303}
12304#[doc = "Load multiple 3-element structures to three registers"]
12305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12306#[doc = "## Safety"]
12307#[doc = "  * Neon instrinsic unsafe"]
12308#[inline]
12309#[target_feature(enable = "neon")]
12310#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12311#[rustc_legacy_const_generics(2)]
12312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12313pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12314    static_assert_uimm_bits!(LANE, 4);
12315    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12316}
12317#[doc = "Load multiple 3-element structures to three registers"]
12318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12319#[doc = "## Safety"]
12320#[doc = "  * Neon instrinsic unsafe"]
12321#[inline]
12322#[target_feature(enable = "neon")]
12323#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12324#[rustc_legacy_const_generics(2)]
12325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12326pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12327    static_assert_uimm_bits!(LANE, 1);
12328    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12329}
12330#[doc = "Load multiple 3-element structures to three registers"]
12331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12332#[doc = "## Safety"]
12333#[doc = "  * Neon instrinsic unsafe"]
12334#[inline]
12335#[target_feature(enable = "neon")]
12336#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12337#[rustc_legacy_const_generics(2)]
12338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12339pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12340    static_assert_uimm_bits!(LANE, 4);
12341    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12342}
12343#[doc = "Load multiple 3-element structures to three registers"]
12344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12345#[doc = "## Safety"]
12346#[doc = "  * Neon instrinsic unsafe"]
12347#[inline]
12348#[cfg(target_endian = "little")]
12349#[target_feature(enable = "neon,aes")]
12350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12351#[cfg_attr(test, assert_instr(ld3))]
12352pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12353    transmute(vld3q_s64(transmute(a)))
12354}
12355#[doc = "Load multiple 3-element structures to three registers"]
12356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12357#[doc = "## Safety"]
12358#[doc = "  * Neon instrinsic unsafe"]
12359#[inline]
12360#[cfg(target_endian = "big")]
12361#[target_feature(enable = "neon,aes")]
12362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12363#[cfg_attr(test, assert_instr(ld3))]
12364pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12365    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12366    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12367    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12368    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12369    ret_val
12370}
12371#[doc = "Load multiple 3-element structures to three registers"]
12372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12373#[doc = "## Safety"]
12374#[doc = "  * Neon instrinsic unsafe"]
12375#[inline]
12376#[cfg(target_endian = "little")]
12377#[target_feature(enable = "neon")]
12378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12379#[cfg_attr(test, assert_instr(ld3))]
12380pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12381    transmute(vld3q_s64(transmute(a)))
12382}
12383#[doc = "Load multiple 3-element structures to three registers"]
12384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12385#[doc = "## Safety"]
12386#[doc = "  * Neon instrinsic unsafe"]
12387#[inline]
12388#[cfg(target_endian = "big")]
12389#[target_feature(enable = "neon")]
12390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12391#[cfg_attr(test, assert_instr(ld3))]
12392pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12393    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12394    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12395    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12396    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12397    ret_val
12398}
12399#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12401#[doc = "## Safety"]
12402#[doc = "  * Neon instrinsic unsafe"]
12403#[inline]
12404#[target_feature(enable = "neon")]
12405#[cfg_attr(test, assert_instr(ld4r))]
12406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12407pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12408    unsafe extern "unadjusted" {
12409        #[cfg_attr(
12410            any(target_arch = "aarch64", target_arch = "arm64ec"),
12411            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12412        )]
12413        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12414    }
12415    _vld4_dup_f64(a as _)
12416}
12417#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12419#[doc = "## Safety"]
12420#[doc = "  * Neon instrinsic unsafe"]
12421#[inline]
12422#[target_feature(enable = "neon")]
12423#[cfg_attr(test, assert_instr(ld4r))]
12424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12425pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12426    unsafe extern "unadjusted" {
12427        #[cfg_attr(
12428            any(target_arch = "aarch64", target_arch = "arm64ec"),
12429            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12430        )]
12431        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12432    }
12433    _vld4q_dup_f64(a as _)
12434}
12435#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12437#[doc = "## Safety"]
12438#[doc = "  * Neon instrinsic unsafe"]
12439#[inline]
12440#[target_feature(enable = "neon")]
12441#[cfg_attr(test, assert_instr(ld4r))]
12442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12443pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12444    unsafe extern "unadjusted" {
12445        #[cfg_attr(
12446            any(target_arch = "aarch64", target_arch = "arm64ec"),
12447            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12448        )]
12449        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12450    }
12451    _vld4q_dup_s64(a as _)
12452}
12453#[doc = "Load multiple 4-element structures to four registers"]
12454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12455#[doc = "## Safety"]
12456#[doc = "  * Neon instrinsic unsafe"]
12457#[inline]
12458#[target_feature(enable = "neon")]
12459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12460#[cfg_attr(test, assert_instr(nop))]
12461pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12462    unsafe extern "unadjusted" {
12463        #[cfg_attr(
12464            any(target_arch = "aarch64", target_arch = "arm64ec"),
12465            link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12466        )]
12467        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12468    }
12469    _vld4_f64(a as _)
12470}
12471#[doc = "Load multiple 4-element structures to four registers"]
12472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12473#[doc = "## Safety"]
12474#[doc = "  * Neon instrinsic unsafe"]
12475#[inline]
12476#[target_feature(enable = "neon")]
12477#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12478#[rustc_legacy_const_generics(2)]
12479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12480pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12481    static_assert!(LANE == 0);
12482    unsafe extern "unadjusted" {
12483        #[cfg_attr(
12484            any(target_arch = "aarch64", target_arch = "arm64ec"),
12485            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12486        )]
12487        fn _vld4_lane_f64(
12488            a: float64x1_t,
12489            b: float64x1_t,
12490            c: float64x1_t,
12491            d: float64x1_t,
12492            n: i64,
12493            ptr: *const i8,
12494        ) -> float64x1x4_t;
12495    }
12496    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12497}
12498#[doc = "Load multiple 4-element structures to four registers"]
12499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12500#[doc = "## Safety"]
12501#[doc = "  * Neon instrinsic unsafe"]
12502#[inline]
12503#[target_feature(enable = "neon")]
12504#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12505#[rustc_legacy_const_generics(2)]
12506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12507pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12508    static_assert!(LANE == 0);
12509    unsafe extern "unadjusted" {
12510        #[cfg_attr(
12511            any(target_arch = "aarch64", target_arch = "arm64ec"),
12512            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12513        )]
12514        fn _vld4_lane_s64(
12515            a: int64x1_t,
12516            b: int64x1_t,
12517            c: int64x1_t,
12518            d: int64x1_t,
12519            n: i64,
12520            ptr: *const i8,
12521        ) -> int64x1x4_t;
12522    }
12523    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12524}
12525#[doc = "Load multiple 4-element structures to four registers"]
12526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12527#[doc = "## Safety"]
12528#[doc = "  * Neon instrinsic unsafe"]
12529#[inline]
12530#[target_feature(enable = "neon,aes")]
12531#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12532#[rustc_legacy_const_generics(2)]
12533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12534pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12535    static_assert!(LANE == 0);
12536    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12537}
12538#[doc = "Load multiple 4-element structures to four registers"]
12539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12540#[doc = "## Safety"]
12541#[doc = "  * Neon instrinsic unsafe"]
12542#[inline]
12543#[target_feature(enable = "neon")]
12544#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12545#[rustc_legacy_const_generics(2)]
12546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12547pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12548    static_assert!(LANE == 0);
12549    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12550}
12551#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12553#[doc = "## Safety"]
12554#[doc = "  * Neon instrinsic unsafe"]
12555#[inline]
12556#[cfg(target_endian = "little")]
12557#[target_feature(enable = "neon,aes")]
12558#[cfg_attr(test, assert_instr(ld4r))]
12559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12560pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12561    transmute(vld4q_dup_s64(transmute(a)))
12562}
12563#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12565#[doc = "## Safety"]
12566#[doc = "  * Neon instrinsic unsafe"]
12567#[inline]
12568#[cfg(target_endian = "big")]
12569#[target_feature(enable = "neon,aes")]
12570#[cfg_attr(test, assert_instr(ld4r))]
12571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12572pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12573    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12574    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12575    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12576    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12577    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12578    ret_val
12579}
12580#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12582#[doc = "## Safety"]
12583#[doc = "  * Neon instrinsic unsafe"]
12584#[inline]
12585#[cfg(target_endian = "little")]
12586#[target_feature(enable = "neon")]
12587#[cfg_attr(test, assert_instr(ld4r))]
12588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12589pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12590    transmute(vld4q_dup_s64(transmute(a)))
12591}
12592#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12594#[doc = "## Safety"]
12595#[doc = "  * Neon instrinsic unsafe"]
12596#[inline]
12597#[cfg(target_endian = "big")]
12598#[target_feature(enable = "neon")]
12599#[cfg_attr(test, assert_instr(ld4r))]
12600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12601pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12602    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12603    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12604    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12605    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12606    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12607    ret_val
12608}
12609#[doc = "Load multiple 4-element structures to four registers"]
12610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12611#[doc = "## Safety"]
12612#[doc = "  * Neon instrinsic unsafe"]
12613#[inline]
12614#[target_feature(enable = "neon")]
12615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12616#[cfg_attr(test, assert_instr(ld4))]
12617pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12618    unsafe extern "unadjusted" {
12619        #[cfg_attr(
12620            any(target_arch = "aarch64", target_arch = "arm64ec"),
12621            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12622        )]
12623        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12624    }
12625    _vld4q_f64(a as _)
12626}
12627#[doc = "Load multiple 4-element structures to four registers"]
12628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12629#[doc = "## Safety"]
12630#[doc = "  * Neon instrinsic unsafe"]
12631#[inline]
12632#[target_feature(enable = "neon")]
12633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12634#[cfg_attr(test, assert_instr(ld4))]
12635pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12636    unsafe extern "unadjusted" {
12637        #[cfg_attr(
12638            any(target_arch = "aarch64", target_arch = "arm64ec"),
12639            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12640        )]
12641        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12642    }
12643    _vld4q_s64(a as _)
12644}
12645#[doc = "Load multiple 4-element structures to four registers"]
12646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12647#[doc = "## Safety"]
12648#[doc = "  * Neon instrinsic unsafe"]
12649#[inline]
12650#[target_feature(enable = "neon")]
12651#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12652#[rustc_legacy_const_generics(2)]
12653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12654pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12655    static_assert_uimm_bits!(LANE, 1);
12656    unsafe extern "unadjusted" {
12657        #[cfg_attr(
12658            any(target_arch = "aarch64", target_arch = "arm64ec"),
12659            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12660        )]
12661        fn _vld4q_lane_f64(
12662            a: float64x2_t,
12663            b: float64x2_t,
12664            c: float64x2_t,
12665            d: float64x2_t,
12666            n: i64,
12667            ptr: *const i8,
12668        ) -> float64x2x4_t;
12669    }
12670    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12671}
12672#[doc = "Load multiple 4-element structures to four registers"]
12673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12674#[doc = "## Safety"]
12675#[doc = "  * Neon instrinsic unsafe"]
12676#[inline]
12677#[target_feature(enable = "neon")]
12678#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12679#[rustc_legacy_const_generics(2)]
12680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12681pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12682    static_assert_uimm_bits!(LANE, 3);
12683    unsafe extern "unadjusted" {
12684        #[cfg_attr(
12685            any(target_arch = "aarch64", target_arch = "arm64ec"),
12686            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12687        )]
12688        fn _vld4q_lane_s8(
12689            a: int8x16_t,
12690            b: int8x16_t,
12691            c: int8x16_t,
12692            d: int8x16_t,
12693            n: i64,
12694            ptr: *const i8,
12695        ) -> int8x16x4_t;
12696    }
12697    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12698}
12699#[doc = "Load multiple 4-element structures to four registers"]
12700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12701#[doc = "## Safety"]
12702#[doc = "  * Neon instrinsic unsafe"]
12703#[inline]
12704#[target_feature(enable = "neon")]
12705#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12706#[rustc_legacy_const_generics(2)]
12707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12708pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12709    static_assert_uimm_bits!(LANE, 1);
12710    unsafe extern "unadjusted" {
12711        #[cfg_attr(
12712            any(target_arch = "aarch64", target_arch = "arm64ec"),
12713            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12714        )]
12715        fn _vld4q_lane_s64(
12716            a: int64x2_t,
12717            b: int64x2_t,
12718            c: int64x2_t,
12719            d: int64x2_t,
12720            n: i64,
12721            ptr: *const i8,
12722        ) -> int64x2x4_t;
12723    }
12724    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12725}
12726#[doc = "Load multiple 4-element structures to four registers"]
12727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12728#[doc = "## Safety"]
12729#[doc = "  * Neon instrinsic unsafe"]
12730#[inline]
12731#[target_feature(enable = "neon,aes")]
12732#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12733#[rustc_legacy_const_generics(2)]
12734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12735pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12736    static_assert_uimm_bits!(LANE, 1);
12737    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12738}
12739#[doc = "Load multiple 4-element structures to four registers"]
12740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12741#[doc = "## Safety"]
12742#[doc = "  * Neon instrinsic unsafe"]
12743#[inline]
12744#[target_feature(enable = "neon")]
12745#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12746#[rustc_legacy_const_generics(2)]
12747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12748pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12749    static_assert_uimm_bits!(LANE, 4);
12750    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12751}
12752#[doc = "Load multiple 4-element structures to four registers"]
12753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12754#[doc = "## Safety"]
12755#[doc = "  * Neon instrinsic unsafe"]
12756#[inline]
12757#[target_feature(enable = "neon")]
12758#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12759#[rustc_legacy_const_generics(2)]
12760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12761pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12762    static_assert_uimm_bits!(LANE, 1);
12763    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12764}
12765#[doc = "Load multiple 4-element structures to four registers"]
12766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12767#[doc = "## Safety"]
12768#[doc = "  * Neon instrinsic unsafe"]
12769#[inline]
12770#[target_feature(enable = "neon")]
12771#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12772#[rustc_legacy_const_generics(2)]
12773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12774pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12775    static_assert_uimm_bits!(LANE, 4);
12776    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12777}
12778#[doc = "Load multiple 4-element structures to four registers"]
12779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12780#[doc = "## Safety"]
12781#[doc = "  * Neon instrinsic unsafe"]
12782#[inline]
12783#[cfg(target_endian = "little")]
12784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12785#[target_feature(enable = "neon,aes")]
12786#[cfg_attr(test, assert_instr(ld4))]
12787pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12788    transmute(vld4q_s64(transmute(a)))
12789}
12790#[doc = "Load multiple 4-element structures to four registers"]
12791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12792#[doc = "## Safety"]
12793#[doc = "  * Neon instrinsic unsafe"]
12794#[inline]
12795#[cfg(target_endian = "big")]
12796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12797#[target_feature(enable = "neon,aes")]
12798#[cfg_attr(test, assert_instr(ld4))]
12799pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12800    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12801    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12802    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12803    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12804    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12805    ret_val
12806}
12807#[doc = "Load multiple 4-element structures to four registers"]
12808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12809#[doc = "## Safety"]
12810#[doc = "  * Neon instrinsic unsafe"]
12811#[inline]
12812#[cfg(target_endian = "little")]
12813#[target_feature(enable = "neon")]
12814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12815#[cfg_attr(test, assert_instr(ld4))]
12816pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12817    transmute(vld4q_s64(transmute(a)))
12818}
12819#[doc = "Load multiple 4-element structures to four registers"]
12820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12821#[doc = "## Safety"]
12822#[doc = "  * Neon instrinsic unsafe"]
12823#[inline]
12824#[cfg(target_endian = "big")]
12825#[target_feature(enable = "neon")]
12826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12827#[cfg_attr(test, assert_instr(ld4))]
12828pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12829    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12830    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12831    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12832    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12833    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12834    ret_val
12835}
12836#[doc = "Lookup table read with 2-bit indices"]
12837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12838#[doc = "## Safety"]
12839#[doc = "  * Neon instrinsic unsafe"]
12840#[inline]
12841#[target_feature(enable = "neon,lut")]
12842#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12843#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12844#[rustc_legacy_const_generics(2)]
12845pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12846    static_assert!(LANE >= 0 && LANE <= 1);
12847    unsafe extern "unadjusted" {
12848        #[cfg_attr(
12849            any(target_arch = "aarch64", target_arch = "arm64ec"),
12850            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12851        )]
12852        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12853    }
12854    _vluti2_lane_s8(a, b, LANE)
12855}
12856#[doc = "Lookup table read with 2-bit indices"]
12857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12858#[doc = "## Safety"]
12859#[doc = "  * Neon instrinsic unsafe"]
12860#[inline]
12861#[target_feature(enable = "neon,lut")]
12862#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12863#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12864#[rustc_legacy_const_generics(2)]
12865pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12866    static_assert!(LANE >= 0 && LANE <= 1);
12867    unsafe extern "unadjusted" {
12868        #[cfg_attr(
12869            any(target_arch = "aarch64", target_arch = "arm64ec"),
12870            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12871        )]
12872        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12873    }
12874    _vluti2q_lane_s8(a, b, LANE)
12875}
12876#[doc = "Lookup table read with 2-bit indices"]
12877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12878#[doc = "## Safety"]
12879#[doc = "  * Neon instrinsic unsafe"]
12880#[inline]
12881#[target_feature(enable = "neon,lut")]
12882#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12883#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12884#[rustc_legacy_const_generics(2)]
12885pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12886    static_assert!(LANE >= 0 && LANE <= 3);
12887    unsafe extern "unadjusted" {
12888        #[cfg_attr(
12889            any(target_arch = "aarch64", target_arch = "arm64ec"),
12890            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12891        )]
12892        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12893    }
12894    _vluti2_lane_s16(a, b, LANE)
12895}
12896#[doc = "Lookup table read with 2-bit indices"]
12897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12898#[doc = "## Safety"]
12899#[doc = "  * Neon instrinsic unsafe"]
12900#[inline]
12901#[target_feature(enable = "neon,lut")]
12902#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12903#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12904#[rustc_legacy_const_generics(2)]
12905pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12906    static_assert!(LANE >= 0 && LANE <= 3);
12907    unsafe extern "unadjusted" {
12908        #[cfg_attr(
12909            any(target_arch = "aarch64", target_arch = "arm64ec"),
12910            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12911        )]
12912        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12913    }
12914    _vluti2q_lane_s16(a, b, LANE)
12915}
12916#[doc = "Lookup table read with 2-bit indices"]
12917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12918#[doc = "## Safety"]
12919#[doc = "  * Neon instrinsic unsafe"]
12920#[inline]
12921#[target_feature(enable = "neon,lut")]
12922#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12923#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12924#[rustc_legacy_const_generics(2)]
12925pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12926    static_assert!(LANE >= 0 && LANE <= 1);
12927    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12928}
12929#[doc = "Lookup table read with 2-bit indices"]
12930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12931#[doc = "## Safety"]
12932#[doc = "  * Neon instrinsic unsafe"]
12933#[inline]
12934#[target_feature(enable = "neon,lut")]
12935#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12936#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12937#[rustc_legacy_const_generics(2)]
12938pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12939    static_assert!(LANE >= 0 && LANE <= 1);
12940    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12941}
12942#[doc = "Lookup table read with 2-bit indices"]
12943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12944#[doc = "## Safety"]
12945#[doc = "  * Neon instrinsic unsafe"]
12946#[inline]
12947#[target_feature(enable = "neon,lut")]
12948#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12949#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12950#[rustc_legacy_const_generics(2)]
12951pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12952    static_assert!(LANE >= 0 && LANE <= 3);
12953    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12954}
12955#[doc = "Lookup table read with 2-bit indices"]
12956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12957#[doc = "## Safety"]
12958#[doc = "  * Neon instrinsic unsafe"]
12959#[inline]
12960#[target_feature(enable = "neon,lut")]
12961#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12962#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12963#[rustc_legacy_const_generics(2)]
12964pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12965    static_assert!(LANE >= 0 && LANE <= 3);
12966    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12967}
12968#[doc = "Lookup table read with 2-bit indices"]
12969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12970#[doc = "## Safety"]
12971#[doc = "  * Neon instrinsic unsafe"]
12972#[inline]
12973#[target_feature(enable = "neon,lut")]
12974#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12975#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12976#[rustc_legacy_const_generics(2)]
12977pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12978    static_assert!(LANE >= 0 && LANE <= 1);
12979    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12980}
12981#[doc = "Lookup table read with 2-bit indices"]
12982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12983#[doc = "## Safety"]
12984#[doc = "  * Neon instrinsic unsafe"]
12985#[inline]
12986#[target_feature(enable = "neon,lut")]
12987#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12988#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12989#[rustc_legacy_const_generics(2)]
12990pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12991    static_assert!(LANE >= 0 && LANE <= 1);
12992    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12993}
12994#[doc = "Lookup table read with 2-bit indices"]
12995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12996#[doc = "## Safety"]
12997#[doc = "  * Neon instrinsic unsafe"]
12998#[inline]
12999#[target_feature(enable = "neon,lut")]
13000#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13001#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13002#[rustc_legacy_const_generics(2)]
13003pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
13004    static_assert!(LANE >= 0 && LANE <= 3);
13005    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
13006}
13007#[doc = "Lookup table read with 2-bit indices"]
13008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
13009#[doc = "## Safety"]
13010#[doc = "  * Neon instrinsic unsafe"]
13011#[inline]
13012#[target_feature(enable = "neon,lut")]
13013#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13014#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13015#[rustc_legacy_const_generics(2)]
13016pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
13017    static_assert!(LANE >= 0 && LANE <= 3);
13018    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
13019}
13020#[doc = "Lookup table read with 4-bit indices"]
13021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13022#[doc = "## Safety"]
13023#[doc = "  * Neon instrinsic unsafe"]
13024#[inline]
13025#[target_feature(enable = "neon,lut,fp16")]
13026#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13027#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13028#[rustc_legacy_const_generics(2)]
13029pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13030    static_assert!(LANE >= 0 && LANE <= 1);
13031    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13032}
13033#[doc = "Lookup table read with 4-bit indices"]
13034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13035#[doc = "## Safety"]
13036#[doc = "  * Neon instrinsic unsafe"]
13037#[inline]
13038#[target_feature(enable = "neon,lut")]
13039#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13040#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13041#[rustc_legacy_const_generics(2)]
13042pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13043    static_assert!(LANE >= 0 && LANE <= 1);
13044    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13045}
13046#[doc = "Lookup table read with 4-bit indices"]
13047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13048#[doc = "## Safety"]
13049#[doc = "  * Neon instrinsic unsafe"]
13050#[inline]
13051#[target_feature(enable = "neon,lut")]
13052#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13053#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13054#[rustc_legacy_const_generics(2)]
13055pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13056    static_assert!(LANE >= 0 && LANE <= 1);
13057    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13058}
13059#[doc = "Lookup table read with 4-bit indices"]
13060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13061#[doc = "## Safety"]
13062#[doc = "  * Neon instrinsic unsafe"]
13063#[inline]
13064#[target_feature(enable = "neon,lut")]
13065#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13066#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13067#[rustc_legacy_const_generics(2)]
13068pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13069    static_assert!(LANE >= 0 && LANE <= 1);
13070    unsafe extern "unadjusted" {
13071        #[cfg_attr(
13072            any(target_arch = "aarch64", target_arch = "arm64ec"),
13073            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13074        )]
13075        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13076    }
13077    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13078}
13079#[doc = "Lookup table read with 4-bit indices"]
13080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13081#[doc = "## Safety"]
13082#[doc = "  * Neon instrinsic unsafe"]
13083#[inline]
13084#[target_feature(enable = "neon,lut")]
13085#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13086#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13087#[rustc_legacy_const_generics(2)]
13088pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13089    static_assert!(LANE == 0);
13090    unsafe extern "unadjusted" {
13091        #[cfg_attr(
13092            any(target_arch = "aarch64", target_arch = "arm64ec"),
13093            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13094        )]
13095        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13096    }
13097    _vluti4q_lane_s8(a, b, LANE)
13098}
13099#[doc = "Lookup table read with 4-bit indices"]
13100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13101#[doc = "## Safety"]
13102#[doc = "  * Neon instrinsic unsafe"]
13103#[inline]
13104#[target_feature(enable = "neon,lut")]
13105#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13106#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13107#[rustc_legacy_const_generics(2)]
13108pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13109    static_assert!(LANE == 0);
13110    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13111}
13112#[doc = "Lookup table read with 4-bit indices"]
13113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13114#[doc = "## Safety"]
13115#[doc = "  * Neon instrinsic unsafe"]
13116#[inline]
13117#[target_feature(enable = "neon,lut")]
13118#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13119#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13120#[rustc_legacy_const_generics(2)]
13121pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13122    static_assert!(LANE == 0);
13123    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13124}
13125#[doc = "Lookup table read with 4-bit indices"]
13126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13127#[doc = "## Safety"]
13128#[doc = "  * Neon instrinsic unsafe"]
13129#[inline]
13130#[target_feature(enable = "neon,lut,fp16")]
13131#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13132#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13133#[rustc_legacy_const_generics(2)]
13134pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13135    a: float16x8x2_t,
13136    b: uint8x16_t,
13137) -> float16x8_t {
13138    static_assert!(LANE >= 0 && LANE <= 3);
13139    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13140}
13141#[doc = "Lookup table read with 4-bit indices"]
13142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13143#[doc = "## Safety"]
13144#[doc = "  * Neon instrinsic unsafe"]
13145#[inline]
13146#[target_feature(enable = "neon,lut")]
13147#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13148#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13149#[rustc_legacy_const_generics(2)]
13150pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13151    static_assert!(LANE >= 0 && LANE <= 3);
13152    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13153}
13154#[doc = "Lookup table read with 4-bit indices"]
13155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13156#[doc = "## Safety"]
13157#[doc = "  * Neon instrinsic unsafe"]
13158#[inline]
13159#[target_feature(enable = "neon,lut")]
13160#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13161#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13162#[rustc_legacy_const_generics(2)]
13163pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13164    static_assert!(LANE >= 0 && LANE <= 3);
13165    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13166}
13167#[doc = "Lookup table read with 4-bit indices"]
13168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13169#[doc = "## Safety"]
13170#[doc = "  * Neon instrinsic unsafe"]
13171#[inline]
13172#[target_feature(enable = "neon,lut")]
13173#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13174#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13175#[rustc_legacy_const_generics(2)]
13176pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13177    static_assert!(LANE >= 0 && LANE <= 3);
13178    unsafe extern "unadjusted" {
13179        #[cfg_attr(
13180            any(target_arch = "aarch64", target_arch = "arm64ec"),
13181            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13182        )]
13183        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13184    }
13185    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13186}
13187#[doc = "Lookup table read with 4-bit indices"]
13188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13189#[doc = "## Safety"]
13190#[doc = "  * Neon instrinsic unsafe"]
13191#[inline]
13192#[target_feature(enable = "neon,lut")]
13193#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13194#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13195#[rustc_legacy_const_generics(2)]
13196pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13197    static_assert!(LANE >= 0 && LANE <= 1);
13198    unsafe extern "unadjusted" {
13199        #[cfg_attr(
13200            any(target_arch = "aarch64", target_arch = "arm64ec"),
13201            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13202        )]
13203        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13204    }
13205    _vluti4q_laneq_s8(a, b, LANE)
13206}
13207#[doc = "Lookup table read with 4-bit indices"]
13208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13209#[doc = "## Safety"]
13210#[doc = "  * Neon instrinsic unsafe"]
13211#[inline]
13212#[target_feature(enable = "neon,lut")]
13213#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13214#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13215#[rustc_legacy_const_generics(2)]
13216pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13217    static_assert!(LANE >= 0 && LANE <= 1);
13218    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13219}
13220#[doc = "Lookup table read with 4-bit indices"]
13221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13222#[doc = "## Safety"]
13223#[doc = "  * Neon instrinsic unsafe"]
13224#[inline]
13225#[target_feature(enable = "neon,lut")]
13226#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13227#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13228#[rustc_legacy_const_generics(2)]
13229pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13230    static_assert!(LANE >= 0 && LANE <= 1);
13231    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13232}
13233#[doc = "Maximum (vector)"]
13234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13235#[inline]
13236#[target_feature(enable = "neon")]
13237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13238#[cfg_attr(test, assert_instr(fmax))]
13239pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13240    unsafe extern "unadjusted" {
13241        #[cfg_attr(
13242            any(target_arch = "aarch64", target_arch = "arm64ec"),
13243            link_name = "llvm.aarch64.neon.fmax.v1f64"
13244        )]
13245        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13246    }
13247    unsafe { _vmax_f64(a, b) }
13248}
13249#[doc = "Maximum (vector)"]
13250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13251#[inline]
13252#[target_feature(enable = "neon")]
13253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13254#[cfg_attr(test, assert_instr(fmax))]
13255pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13256    unsafe extern "unadjusted" {
13257        #[cfg_attr(
13258            any(target_arch = "aarch64", target_arch = "arm64ec"),
13259            link_name = "llvm.aarch64.neon.fmax.v2f64"
13260        )]
13261        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13262    }
13263    unsafe { _vmaxq_f64(a, b) }
13264}
13265#[doc = "Maximum (vector)"]
13266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13267#[inline]
13268#[target_feature(enable = "neon,fp16")]
13269#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13270#[cfg(not(target_arch = "arm64ec"))]
13271#[cfg_attr(test, assert_instr(fmax))]
13272pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13273    unsafe extern "unadjusted" {
13274        #[cfg_attr(
13275            any(target_arch = "aarch64", target_arch = "arm64ec"),
13276            link_name = "llvm.aarch64.neon.fmax.f16"
13277        )]
13278        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13279    }
13280    unsafe { _vmaxh_f16(a, b) }
13281}
13282#[doc = "Floating-point Maximum Number (vector)"]
13283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13284#[inline]
13285#[target_feature(enable = "neon")]
13286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13287#[cfg_attr(test, assert_instr(fmaxnm))]
13288pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13289    unsafe { simd_fmax(a, b) }
13290}
13291#[doc = "Floating-point Maximum Number (vector)"]
13292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13293#[inline]
13294#[target_feature(enable = "neon")]
13295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13296#[cfg_attr(test, assert_instr(fmaxnm))]
13297pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13298    unsafe { simd_fmax(a, b) }
13299}
13300#[doc = "Floating-point Maximum Number"]
13301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13302#[inline]
13303#[target_feature(enable = "neon,fp16")]
13304#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13305#[cfg(not(target_arch = "arm64ec"))]
13306#[cfg_attr(test, assert_instr(fmaxnm))]
13307pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13308    f16::max(a, b)
13309}
13310#[doc = "Floating-point maximum number across vector"]
13311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13312#[inline]
13313#[target_feature(enable = "neon,fp16")]
13314#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13315#[cfg(not(target_arch = "arm64ec"))]
13316#[cfg_attr(test, assert_instr(fmaxnmv))]
13317pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13318    unsafe { simd_reduce_max(a) }
13319}
13320#[doc = "Floating-point maximum number across vector"]
13321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13322#[inline]
13323#[target_feature(enable = "neon,fp16")]
13324#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13325#[cfg(not(target_arch = "arm64ec"))]
13326#[cfg_attr(test, assert_instr(fmaxnmv))]
13327pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13328    unsafe { simd_reduce_max(a) }
13329}
13330#[doc = "Floating-point maximum number across vector"]
13331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13332#[inline]
13333#[target_feature(enable = "neon")]
13334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13335#[cfg_attr(test, assert_instr(fmaxnmp))]
13336pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13337    unsafe { simd_reduce_max(a) }
13338}
13339#[doc = "Floating-point maximum number across vector"]
13340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13341#[inline]
13342#[target_feature(enable = "neon")]
13343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13344#[cfg_attr(test, assert_instr(fmaxnmp))]
13345pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13346    unsafe { simd_reduce_max(a) }
13347}
13348#[doc = "Floating-point maximum number across vector"]
13349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13350#[inline]
13351#[target_feature(enable = "neon")]
13352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13353#[cfg_attr(test, assert_instr(fmaxnmv))]
13354pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13355    unsafe { simd_reduce_max(a) }
13356}
13357#[doc = "Floating-point maximum number across vector"]
13358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13359#[inline]
13360#[target_feature(enable = "neon,fp16")]
13361#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13362#[cfg(not(target_arch = "arm64ec"))]
13363#[cfg_attr(test, assert_instr(fmaxv))]
13364pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13365    unsafe extern "unadjusted" {
13366        #[cfg_attr(
13367            any(target_arch = "aarch64", target_arch = "arm64ec"),
13368            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13369        )]
13370        fn _vmaxv_f16(a: float16x4_t) -> f16;
13371    }
13372    unsafe { _vmaxv_f16(a) }
13373}
13374#[doc = "Floating-point maximum number across vector"]
13375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13376#[inline]
13377#[target_feature(enable = "neon,fp16")]
13378#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13379#[cfg(not(target_arch = "arm64ec"))]
13380#[cfg_attr(test, assert_instr(fmaxv))]
13381pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13382    unsafe extern "unadjusted" {
13383        #[cfg_attr(
13384            any(target_arch = "aarch64", target_arch = "arm64ec"),
13385            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13386        )]
13387        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13388    }
13389    unsafe { _vmaxvq_f16(a) }
13390}
13391#[doc = "Horizontal vector max."]
13392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13393#[inline]
13394#[target_feature(enable = "neon")]
13395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13396#[cfg_attr(test, assert_instr(fmaxp))]
13397pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13398    unsafe extern "unadjusted" {
13399        #[cfg_attr(
13400            any(target_arch = "aarch64", target_arch = "arm64ec"),
13401            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13402        )]
13403        fn _vmaxv_f32(a: float32x2_t) -> f32;
13404    }
13405    unsafe { _vmaxv_f32(a) }
13406}
13407#[doc = "Horizontal vector max."]
13408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13409#[inline]
13410#[target_feature(enable = "neon")]
13411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13412#[cfg_attr(test, assert_instr(fmaxv))]
13413pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13414    unsafe extern "unadjusted" {
13415        #[cfg_attr(
13416            any(target_arch = "aarch64", target_arch = "arm64ec"),
13417            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13418        )]
13419        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13420    }
13421    unsafe { _vmaxvq_f32(a) }
13422}
13423#[doc = "Horizontal vector max."]
13424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13425#[inline]
13426#[target_feature(enable = "neon")]
13427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13428#[cfg_attr(test, assert_instr(fmaxp))]
13429pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13430    unsafe extern "unadjusted" {
13431        #[cfg_attr(
13432            any(target_arch = "aarch64", target_arch = "arm64ec"),
13433            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13434        )]
13435        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13436    }
13437    unsafe { _vmaxvq_f64(a) }
13438}
13439#[doc = "Horizontal vector max."]
13440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13441#[inline]
13442#[target_feature(enable = "neon")]
13443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13444#[cfg_attr(test, assert_instr(smaxv))]
13445pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13446    unsafe { simd_reduce_max(a) }
13447}
13448#[doc = "Horizontal vector max."]
13449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13450#[inline]
13451#[target_feature(enable = "neon")]
13452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13453#[cfg_attr(test, assert_instr(smaxv))]
13454pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13455    unsafe { simd_reduce_max(a) }
13456}
13457#[doc = "Horizontal vector max."]
13458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13459#[inline]
13460#[target_feature(enable = "neon")]
13461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13462#[cfg_attr(test, assert_instr(smaxv))]
13463pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13464    unsafe { simd_reduce_max(a) }
13465}
13466#[doc = "Horizontal vector max."]
13467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13468#[inline]
13469#[target_feature(enable = "neon")]
13470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13471#[cfg_attr(test, assert_instr(smaxv))]
13472pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13473    unsafe { simd_reduce_max(a) }
13474}
13475#[doc = "Horizontal vector max."]
13476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13477#[inline]
13478#[target_feature(enable = "neon")]
13479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13480#[cfg_attr(test, assert_instr(smaxp))]
13481pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13482    unsafe { simd_reduce_max(a) }
13483}
13484#[doc = "Horizontal vector max."]
13485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13486#[inline]
13487#[target_feature(enable = "neon")]
13488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13489#[cfg_attr(test, assert_instr(smaxv))]
13490pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13491    unsafe { simd_reduce_max(a) }
13492}
13493#[doc = "Horizontal vector max."]
13494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13495#[inline]
13496#[target_feature(enable = "neon")]
13497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13498#[cfg_attr(test, assert_instr(umaxv))]
13499pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13500    unsafe { simd_reduce_max(a) }
13501}
13502#[doc = "Horizontal vector max."]
13503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13504#[inline]
13505#[target_feature(enable = "neon")]
13506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13507#[cfg_attr(test, assert_instr(umaxv))]
13508pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13509    unsafe { simd_reduce_max(a) }
13510}
13511#[doc = "Horizontal vector max."]
13512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13513#[inline]
13514#[target_feature(enable = "neon")]
13515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13516#[cfg_attr(test, assert_instr(umaxv))]
13517pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13518    unsafe { simd_reduce_max(a) }
13519}
13520#[doc = "Horizontal vector max."]
13521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13522#[inline]
13523#[target_feature(enable = "neon")]
13524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13525#[cfg_attr(test, assert_instr(umaxv))]
13526pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13527    unsafe { simd_reduce_max(a) }
13528}
13529#[doc = "Horizontal vector max."]
13530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13531#[inline]
13532#[target_feature(enable = "neon")]
13533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13534#[cfg_attr(test, assert_instr(umaxp))]
13535pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13536    unsafe { simd_reduce_max(a) }
13537}
13538#[doc = "Horizontal vector max."]
13539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13540#[inline]
13541#[target_feature(enable = "neon")]
13542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13543#[cfg_attr(test, assert_instr(umaxv))]
13544pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13545    unsafe { simd_reduce_max(a) }
13546}
13547#[doc = "Minimum (vector)"]
13548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13549#[inline]
13550#[target_feature(enable = "neon")]
13551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13552#[cfg_attr(test, assert_instr(fmin))]
13553pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13554    unsafe extern "unadjusted" {
13555        #[cfg_attr(
13556            any(target_arch = "aarch64", target_arch = "arm64ec"),
13557            link_name = "llvm.aarch64.neon.fmin.v1f64"
13558        )]
13559        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13560    }
13561    unsafe { _vmin_f64(a, b) }
13562}
13563#[doc = "Minimum (vector)"]
13564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13565#[inline]
13566#[target_feature(enable = "neon")]
13567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13568#[cfg_attr(test, assert_instr(fmin))]
13569pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13570    unsafe extern "unadjusted" {
13571        #[cfg_attr(
13572            any(target_arch = "aarch64", target_arch = "arm64ec"),
13573            link_name = "llvm.aarch64.neon.fmin.v2f64"
13574        )]
13575        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13576    }
13577    unsafe { _vminq_f64(a, b) }
13578}
13579#[doc = "Minimum (vector)"]
13580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13581#[inline]
13582#[target_feature(enable = "neon,fp16")]
13583#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13584#[cfg(not(target_arch = "arm64ec"))]
13585#[cfg_attr(test, assert_instr(fmin))]
13586pub fn vminh_f16(a: f16, b: f16) -> f16 {
13587    unsafe extern "unadjusted" {
13588        #[cfg_attr(
13589            any(target_arch = "aarch64", target_arch = "arm64ec"),
13590            link_name = "llvm.aarch64.neon.fmin.f16"
13591        )]
13592        fn _vminh_f16(a: f16, b: f16) -> f16;
13593    }
13594    unsafe { _vminh_f16(a, b) }
13595}
13596#[doc = "Floating-point Minimum Number (vector)"]
13597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13598#[inline]
13599#[target_feature(enable = "neon")]
13600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13601#[cfg_attr(test, assert_instr(fminnm))]
13602pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13603    unsafe { simd_fmin(a, b) }
13604}
13605#[doc = "Floating-point Minimum Number (vector)"]
13606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13607#[inline]
13608#[target_feature(enable = "neon")]
13609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13610#[cfg_attr(test, assert_instr(fminnm))]
13611pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13612    unsafe { simd_fmin(a, b) }
13613}
13614#[doc = "Floating-point Minimum Number"]
13615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13616#[inline]
13617#[target_feature(enable = "neon,fp16")]
13618#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13619#[cfg(not(target_arch = "arm64ec"))]
13620#[cfg_attr(test, assert_instr(fminnm))]
13621pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13622    f16::min(a, b)
13623}
13624#[doc = "Floating-point minimum number across vector"]
13625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13626#[inline]
13627#[target_feature(enable = "neon,fp16")]
13628#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13629#[cfg(not(target_arch = "arm64ec"))]
13630#[cfg_attr(test, assert_instr(fminnmv))]
13631pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13632    unsafe { simd_reduce_min(a) }
13633}
13634#[doc = "Floating-point minimum number across vector"]
13635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13636#[inline]
13637#[target_feature(enable = "neon,fp16")]
13638#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13639#[cfg(not(target_arch = "arm64ec"))]
13640#[cfg_attr(test, assert_instr(fminnmv))]
13641pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13642    unsafe { simd_reduce_min(a) }
13643}
13644#[doc = "Floating-point minimum number across vector"]
13645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13646#[inline]
13647#[target_feature(enable = "neon")]
13648#[cfg_attr(test, assert_instr(fminnmp))]
13649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13650pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13651    unsafe { simd_reduce_min(a) }
13652}
13653#[doc = "Floating-point minimum number across vector"]
13654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13655#[inline]
13656#[target_feature(enable = "neon")]
13657#[cfg_attr(test, assert_instr(fminnmp))]
13658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13659pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13660    unsafe { simd_reduce_min(a) }
13661}
13662#[doc = "Floating-point minimum number across vector"]
13663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13664#[inline]
13665#[target_feature(enable = "neon")]
13666#[cfg_attr(test, assert_instr(fminnmv))]
13667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13668pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13669    unsafe { simd_reduce_min(a) }
13670}
13671#[doc = "Floating-point minimum number across vector"]
13672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13673#[inline]
13674#[target_feature(enable = "neon,fp16")]
13675#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13676#[cfg(not(target_arch = "arm64ec"))]
13677#[cfg_attr(test, assert_instr(fminv))]
13678pub fn vminv_f16(a: float16x4_t) -> f16 {
13679    unsafe extern "unadjusted" {
13680        #[cfg_attr(
13681            any(target_arch = "aarch64", target_arch = "arm64ec"),
13682            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13683        )]
13684        fn _vminv_f16(a: float16x4_t) -> f16;
13685    }
13686    unsafe { _vminv_f16(a) }
13687}
13688#[doc = "Floating-point minimum number across vector"]
13689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13690#[inline]
13691#[target_feature(enable = "neon,fp16")]
13692#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13693#[cfg(not(target_arch = "arm64ec"))]
13694#[cfg_attr(test, assert_instr(fminv))]
13695pub fn vminvq_f16(a: float16x8_t) -> f16 {
13696    unsafe extern "unadjusted" {
13697        #[cfg_attr(
13698            any(target_arch = "aarch64", target_arch = "arm64ec"),
13699            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13700        )]
13701        fn _vminvq_f16(a: float16x8_t) -> f16;
13702    }
13703    unsafe { _vminvq_f16(a) }
13704}
13705#[doc = "Horizontal vector min."]
13706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13707#[inline]
13708#[target_feature(enable = "neon")]
13709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13710#[cfg_attr(test, assert_instr(fminp))]
13711pub fn vminv_f32(a: float32x2_t) -> f32 {
13712    unsafe extern "unadjusted" {
13713        #[cfg_attr(
13714            any(target_arch = "aarch64", target_arch = "arm64ec"),
13715            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13716        )]
13717        fn _vminv_f32(a: float32x2_t) -> f32;
13718    }
13719    unsafe { _vminv_f32(a) }
13720}
13721#[doc = "Horizontal vector min."]
13722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13723#[inline]
13724#[target_feature(enable = "neon")]
13725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13726#[cfg_attr(test, assert_instr(fminv))]
13727pub fn vminvq_f32(a: float32x4_t) -> f32 {
13728    unsafe extern "unadjusted" {
13729        #[cfg_attr(
13730            any(target_arch = "aarch64", target_arch = "arm64ec"),
13731            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13732        )]
13733        fn _vminvq_f32(a: float32x4_t) -> f32;
13734    }
13735    unsafe { _vminvq_f32(a) }
13736}
13737#[doc = "Horizontal vector min."]
13738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13739#[inline]
13740#[target_feature(enable = "neon")]
13741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13742#[cfg_attr(test, assert_instr(fminp))]
13743pub fn vminvq_f64(a: float64x2_t) -> f64 {
13744    unsafe extern "unadjusted" {
13745        #[cfg_attr(
13746            any(target_arch = "aarch64", target_arch = "arm64ec"),
13747            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13748        )]
13749        fn _vminvq_f64(a: float64x2_t) -> f64;
13750    }
13751    unsafe { _vminvq_f64(a) }
13752}
13753#[doc = "Horizontal vector min."]
13754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13755#[inline]
13756#[target_feature(enable = "neon")]
13757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13758#[cfg_attr(test, assert_instr(sminv))]
13759pub fn vminv_s8(a: int8x8_t) -> i8 {
13760    unsafe { simd_reduce_min(a) }
13761}
13762#[doc = "Horizontal vector min."]
13763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13764#[inline]
13765#[target_feature(enable = "neon")]
13766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13767#[cfg_attr(test, assert_instr(sminv))]
13768pub fn vminvq_s8(a: int8x16_t) -> i8 {
13769    unsafe { simd_reduce_min(a) }
13770}
13771#[doc = "Horizontal vector min."]
13772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13773#[inline]
13774#[target_feature(enable = "neon")]
13775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13776#[cfg_attr(test, assert_instr(sminv))]
13777pub fn vminv_s16(a: int16x4_t) -> i16 {
13778    unsafe { simd_reduce_min(a) }
13779}
13780#[doc = "Horizontal vector min."]
13781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13782#[inline]
13783#[target_feature(enable = "neon")]
13784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13785#[cfg_attr(test, assert_instr(sminv))]
13786pub fn vminvq_s16(a: int16x8_t) -> i16 {
13787    unsafe { simd_reduce_min(a) }
13788}
13789#[doc = "Horizontal vector min."]
13790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13791#[inline]
13792#[target_feature(enable = "neon")]
13793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13794#[cfg_attr(test, assert_instr(sminp))]
13795pub fn vminv_s32(a: int32x2_t) -> i32 {
13796    unsafe { simd_reduce_min(a) }
13797}
13798#[doc = "Horizontal vector min."]
13799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
13800#[inline]
13801#[target_feature(enable = "neon")]
13802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13803#[cfg_attr(test, assert_instr(sminv))]
13804pub fn vminvq_s32(a: int32x4_t) -> i32 {
13805    unsafe { simd_reduce_min(a) }
13806}
13807#[doc = "Horizontal vector min."]
13808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
13809#[inline]
13810#[target_feature(enable = "neon")]
13811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13812#[cfg_attr(test, assert_instr(uminv))]
13813pub fn vminv_u8(a: uint8x8_t) -> u8 {
13814    unsafe { simd_reduce_min(a) }
13815}
13816#[doc = "Horizontal vector min."]
13817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
13818#[inline]
13819#[target_feature(enable = "neon")]
13820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13821#[cfg_attr(test, assert_instr(uminv))]
13822pub fn vminvq_u8(a: uint8x16_t) -> u8 {
13823    unsafe { simd_reduce_min(a) }
13824}
13825#[doc = "Horizontal vector min."]
13826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
13827#[inline]
13828#[target_feature(enable = "neon")]
13829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13830#[cfg_attr(test, assert_instr(uminv))]
13831pub fn vminv_u16(a: uint16x4_t) -> u16 {
13832    unsafe { simd_reduce_min(a) }
13833}
13834#[doc = "Horizontal vector min."]
13835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
13836#[inline]
13837#[target_feature(enable = "neon")]
13838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13839#[cfg_attr(test, assert_instr(uminv))]
13840pub fn vminvq_u16(a: uint16x8_t) -> u16 {
13841    unsafe { simd_reduce_min(a) }
13842}
13843#[doc = "Horizontal vector min."]
13844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
13845#[inline]
13846#[target_feature(enable = "neon")]
13847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13848#[cfg_attr(test, assert_instr(uminp))]
13849pub fn vminv_u32(a: uint32x2_t) -> u32 {
13850    unsafe { simd_reduce_min(a) }
13851}
13852#[doc = "Horizontal vector min."]
13853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
13854#[inline]
13855#[target_feature(enable = "neon")]
13856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13857#[cfg_attr(test, assert_instr(uminv))]
13858pub fn vminvq_u32(a: uint32x4_t) -> u32 {
13859    unsafe { simd_reduce_min(a) }
13860}
13861#[doc = "Floating-point multiply-add to accumulator"]
13862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
13863#[inline]
13864#[target_feature(enable = "neon")]
13865#[cfg_attr(test, assert_instr(fmul))]
13866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13867pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
13868    unsafe { simd_add(a, simd_mul(b, c)) }
13869}
13870#[doc = "Floating-point multiply-add to accumulator"]
13871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
13872#[inline]
13873#[target_feature(enable = "neon")]
13874#[cfg_attr(test, assert_instr(fmul))]
13875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13876pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
13877    unsafe { simd_add(a, simd_mul(b, c)) }
13878}
13879#[doc = "Multiply-add long"]
13880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
13881#[inline]
13882#[target_feature(enable = "neon")]
13883#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13884#[rustc_legacy_const_generics(3)]
13885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13886pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
13887    static_assert_uimm_bits!(LANE, 2);
13888    unsafe {
13889        vmlal_high_s16(
13890            a,
13891            b,
13892            simd_shuffle!(
13893                c,
13894                c,
13895                [
13896                    LANE as u32,
13897                    LANE as u32,
13898                    LANE as u32,
13899                    LANE as u32,
13900                    LANE as u32,
13901                    LANE as u32,
13902                    LANE as u32,
13903                    LANE as u32
13904                ]
13905            ),
13906        )
13907    }
13908}
13909#[doc = "Multiply-add long"]
13910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
13911#[inline]
13912#[target_feature(enable = "neon")]
13913#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13914#[rustc_legacy_const_generics(3)]
13915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13916pub fn vmlal_high_laneq_s16<const LANE: i32>(
13917    a: int32x4_t,
13918    b: int16x8_t,
13919    c: int16x8_t,
13920) -> int32x4_t {
13921    static_assert_uimm_bits!(LANE, 3);
13922    unsafe {
13923        vmlal_high_s16(
13924            a,
13925            b,
13926            simd_shuffle!(
13927                c,
13928                c,
13929                [
13930                    LANE as u32,
13931                    LANE as u32,
13932                    LANE as u32,
13933                    LANE as u32,
13934                    LANE as u32,
13935                    LANE as u32,
13936                    LANE as u32,
13937                    LANE as u32
13938                ]
13939            ),
13940        )
13941    }
13942}
13943#[doc = "Multiply-add long"]
13944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
13945#[inline]
13946#[target_feature(enable = "neon")]
13947#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13948#[rustc_legacy_const_generics(3)]
13949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13950pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
13951    static_assert_uimm_bits!(LANE, 1);
13952    unsafe {
13953        vmlal_high_s32(
13954            a,
13955            b,
13956            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13957        )
13958    }
13959}
13960#[doc = "Multiply-add long"]
13961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
13962#[inline]
13963#[target_feature(enable = "neon")]
13964#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13965#[rustc_legacy_const_generics(3)]
13966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13967pub fn vmlal_high_laneq_s32<const LANE: i32>(
13968    a: int64x2_t,
13969    b: int32x4_t,
13970    c: int32x4_t,
13971) -> int64x2_t {
13972    static_assert_uimm_bits!(LANE, 2);
13973    unsafe {
13974        vmlal_high_s32(
13975            a,
13976            b,
13977            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13978        )
13979    }
13980}
13981#[doc = "Multiply-add long"]
13982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
13983#[inline]
13984#[target_feature(enable = "neon")]
13985#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
13986#[rustc_legacy_const_generics(3)]
13987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13988pub fn vmlal_high_lane_u16<const LANE: i32>(
13989    a: uint32x4_t,
13990    b: uint16x8_t,
13991    c: uint16x4_t,
13992) -> uint32x4_t {
13993    static_assert_uimm_bits!(LANE, 2);
13994    unsafe {
13995        vmlal_high_u16(
13996            a,
13997            b,
13998            simd_shuffle!(
13999                c,
14000                c,
14001                [
14002                    LANE as u32,
14003                    LANE as u32,
14004                    LANE as u32,
14005                    LANE as u32,
14006                    LANE as u32,
14007                    LANE as u32,
14008                    LANE as u32,
14009                    LANE as u32
14010                ]
14011            ),
14012        )
14013    }
14014}
14015#[doc = "Multiply-add long"]
14016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14017#[inline]
14018#[target_feature(enable = "neon")]
14019#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14020#[rustc_legacy_const_generics(3)]
14021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14022pub fn vmlal_high_laneq_u16<const LANE: i32>(
14023    a: uint32x4_t,
14024    b: uint16x8_t,
14025    c: uint16x8_t,
14026) -> uint32x4_t {
14027    static_assert_uimm_bits!(LANE, 3);
14028    unsafe {
14029        vmlal_high_u16(
14030            a,
14031            b,
14032            simd_shuffle!(
14033                c,
14034                c,
14035                [
14036                    LANE as u32,
14037                    LANE as u32,
14038                    LANE as u32,
14039                    LANE as u32,
14040                    LANE as u32,
14041                    LANE as u32,
14042                    LANE as u32,
14043                    LANE as u32
14044                ]
14045            ),
14046        )
14047    }
14048}
14049#[doc = "Multiply-add long"]
14050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14051#[inline]
14052#[target_feature(enable = "neon")]
14053#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14054#[rustc_legacy_const_generics(3)]
14055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14056pub fn vmlal_high_lane_u32<const LANE: i32>(
14057    a: uint64x2_t,
14058    b: uint32x4_t,
14059    c: uint32x2_t,
14060) -> uint64x2_t {
14061    static_assert_uimm_bits!(LANE, 1);
14062    unsafe {
14063        vmlal_high_u32(
14064            a,
14065            b,
14066            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14067        )
14068    }
14069}
14070#[doc = "Multiply-add long"]
14071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14072#[inline]
14073#[target_feature(enable = "neon")]
14074#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14075#[rustc_legacy_const_generics(3)]
14076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14077pub fn vmlal_high_laneq_u32<const LANE: i32>(
14078    a: uint64x2_t,
14079    b: uint32x4_t,
14080    c: uint32x4_t,
14081) -> uint64x2_t {
14082    static_assert_uimm_bits!(LANE, 2);
14083    unsafe {
14084        vmlal_high_u32(
14085            a,
14086            b,
14087            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14088        )
14089    }
14090}
14091#[doc = "Multiply-add long"]
14092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14093#[inline]
14094#[target_feature(enable = "neon")]
14095#[cfg_attr(test, assert_instr(smlal2))]
14096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14097pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14098    vmlal_high_s16(a, b, vdupq_n_s16(c))
14099}
14100#[doc = "Multiply-add long"]
14101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14102#[inline]
14103#[target_feature(enable = "neon")]
14104#[cfg_attr(test, assert_instr(smlal2))]
14105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14106pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14107    vmlal_high_s32(a, b, vdupq_n_s32(c))
14108}
14109#[doc = "Multiply-add long"]
14110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14111#[inline]
14112#[target_feature(enable = "neon")]
14113#[cfg_attr(test, assert_instr(umlal2))]
14114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14115pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14116    vmlal_high_u16(a, b, vdupq_n_u16(c))
14117}
14118#[doc = "Multiply-add long"]
14119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14120#[inline]
14121#[target_feature(enable = "neon")]
14122#[cfg_attr(test, assert_instr(umlal2))]
14123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14124pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14125    vmlal_high_u32(a, b, vdupq_n_u32(c))
14126}
14127#[doc = "Signed multiply-add long"]
14128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14129#[inline]
14130#[target_feature(enable = "neon")]
14131#[cfg_attr(test, assert_instr(smlal2))]
14132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14133pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14134    unsafe {
14135        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14136        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14137        vmlal_s8(a, b, c)
14138    }
14139}
14140#[doc = "Signed multiply-add long"]
14141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14142#[inline]
14143#[target_feature(enable = "neon")]
14144#[cfg_attr(test, assert_instr(smlal2))]
14145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14146pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14147    unsafe {
14148        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14149        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14150        vmlal_s16(a, b, c)
14151    }
14152}
14153#[doc = "Signed multiply-add long"]
14154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14155#[inline]
14156#[target_feature(enable = "neon")]
14157#[cfg_attr(test, assert_instr(smlal2))]
14158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14159pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14160    unsafe {
14161        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14162        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14163        vmlal_s32(a, b, c)
14164    }
14165}
14166#[doc = "Unsigned multiply-add long"]
14167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14168#[inline]
14169#[target_feature(enable = "neon")]
14170#[cfg_attr(test, assert_instr(umlal2))]
14171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14172pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14173    unsafe {
14174        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14175        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14176        vmlal_u8(a, b, c)
14177    }
14178}
14179#[doc = "Unsigned multiply-add long"]
14180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14181#[inline]
14182#[target_feature(enable = "neon")]
14183#[cfg_attr(test, assert_instr(umlal2))]
14184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14185pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14186    unsafe {
14187        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14188        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14189        vmlal_u16(a, b, c)
14190    }
14191}
14192#[doc = "Unsigned multiply-add long"]
14193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14194#[inline]
14195#[target_feature(enable = "neon")]
14196#[cfg_attr(test, assert_instr(umlal2))]
14197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14198pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14199    unsafe {
14200        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14201        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14202        vmlal_u32(a, b, c)
14203    }
14204}
14205#[doc = "Floating-point multiply-subtract from accumulator"]
14206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14207#[inline]
14208#[target_feature(enable = "neon")]
14209#[cfg_attr(test, assert_instr(fmul))]
14210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14211pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14212    unsafe { simd_sub(a, simd_mul(b, c)) }
14213}
14214#[doc = "Floating-point multiply-subtract from accumulator"]
14215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14216#[inline]
14217#[target_feature(enable = "neon")]
14218#[cfg_attr(test, assert_instr(fmul))]
14219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14220pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14221    unsafe { simd_sub(a, simd_mul(b, c)) }
14222}
14223#[doc = "Multiply-subtract long"]
14224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14225#[inline]
14226#[target_feature(enable = "neon")]
14227#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14228#[rustc_legacy_const_generics(3)]
14229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14230pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14231    static_assert_uimm_bits!(LANE, 2);
14232    unsafe {
14233        vmlsl_high_s16(
14234            a,
14235            b,
14236            simd_shuffle!(
14237                c,
14238                c,
14239                [
14240                    LANE as u32,
14241                    LANE as u32,
14242                    LANE as u32,
14243                    LANE as u32,
14244                    LANE as u32,
14245                    LANE as u32,
14246                    LANE as u32,
14247                    LANE as u32
14248                ]
14249            ),
14250        )
14251    }
14252}
14253#[doc = "Multiply-subtract long"]
14254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14255#[inline]
14256#[target_feature(enable = "neon")]
14257#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14258#[rustc_legacy_const_generics(3)]
14259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14260pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14261    a: int32x4_t,
14262    b: int16x8_t,
14263    c: int16x8_t,
14264) -> int32x4_t {
14265    static_assert_uimm_bits!(LANE, 3);
14266    unsafe {
14267        vmlsl_high_s16(
14268            a,
14269            b,
14270            simd_shuffle!(
14271                c,
14272                c,
14273                [
14274                    LANE as u32,
14275                    LANE as u32,
14276                    LANE as u32,
14277                    LANE as u32,
14278                    LANE as u32,
14279                    LANE as u32,
14280                    LANE as u32,
14281                    LANE as u32
14282                ]
14283            ),
14284        )
14285    }
14286}
14287#[doc = "Multiply-subtract long"]
14288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14289#[inline]
14290#[target_feature(enable = "neon")]
14291#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14292#[rustc_legacy_const_generics(3)]
14293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14294pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14295    static_assert_uimm_bits!(LANE, 1);
14296    unsafe {
14297        vmlsl_high_s32(
14298            a,
14299            b,
14300            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14301        )
14302    }
14303}
14304#[doc = "Multiply-subtract long"]
14305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14306#[inline]
14307#[target_feature(enable = "neon")]
14308#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14309#[rustc_legacy_const_generics(3)]
14310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14311pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14312    a: int64x2_t,
14313    b: int32x4_t,
14314    c: int32x4_t,
14315) -> int64x2_t {
14316    static_assert_uimm_bits!(LANE, 2);
14317    unsafe {
14318        vmlsl_high_s32(
14319            a,
14320            b,
14321            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14322        )
14323    }
14324}
14325#[doc = "Multiply-subtract long"]
14326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14327#[inline]
14328#[target_feature(enable = "neon")]
14329#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14330#[rustc_legacy_const_generics(3)]
14331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14332pub fn vmlsl_high_lane_u16<const LANE: i32>(
14333    a: uint32x4_t,
14334    b: uint16x8_t,
14335    c: uint16x4_t,
14336) -> uint32x4_t {
14337    static_assert_uimm_bits!(LANE, 2);
14338    unsafe {
14339        vmlsl_high_u16(
14340            a,
14341            b,
14342            simd_shuffle!(
14343                c,
14344                c,
14345                [
14346                    LANE as u32,
14347                    LANE as u32,
14348                    LANE as u32,
14349                    LANE as u32,
14350                    LANE as u32,
14351                    LANE as u32,
14352                    LANE as u32,
14353                    LANE as u32
14354                ]
14355            ),
14356        )
14357    }
14358}
14359#[doc = "Multiply-subtract long"]
14360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14361#[inline]
14362#[target_feature(enable = "neon")]
14363#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14364#[rustc_legacy_const_generics(3)]
14365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14366pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14367    a: uint32x4_t,
14368    b: uint16x8_t,
14369    c: uint16x8_t,
14370) -> uint32x4_t {
14371    static_assert_uimm_bits!(LANE, 3);
14372    unsafe {
14373        vmlsl_high_u16(
14374            a,
14375            b,
14376            simd_shuffle!(
14377                c,
14378                c,
14379                [
14380                    LANE as u32,
14381                    LANE as u32,
14382                    LANE as u32,
14383                    LANE as u32,
14384                    LANE as u32,
14385                    LANE as u32,
14386                    LANE as u32,
14387                    LANE as u32
14388                ]
14389            ),
14390        )
14391    }
14392}
14393#[doc = "Multiply-subtract long"]
14394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14395#[inline]
14396#[target_feature(enable = "neon")]
14397#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14398#[rustc_legacy_const_generics(3)]
14399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14400pub fn vmlsl_high_lane_u32<const LANE: i32>(
14401    a: uint64x2_t,
14402    b: uint32x4_t,
14403    c: uint32x2_t,
14404) -> uint64x2_t {
14405    static_assert_uimm_bits!(LANE, 1);
14406    unsafe {
14407        vmlsl_high_u32(
14408            a,
14409            b,
14410            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14411        )
14412    }
14413}
14414#[doc = "Multiply-subtract long"]
14415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14416#[inline]
14417#[target_feature(enable = "neon")]
14418#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14419#[rustc_legacy_const_generics(3)]
14420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14421pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14422    a: uint64x2_t,
14423    b: uint32x4_t,
14424    c: uint32x4_t,
14425) -> uint64x2_t {
14426    static_assert_uimm_bits!(LANE, 2);
14427    unsafe {
14428        vmlsl_high_u32(
14429            a,
14430            b,
14431            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14432        )
14433    }
14434}
14435#[doc = "Multiply-subtract long"]
14436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14437#[inline]
14438#[target_feature(enable = "neon")]
14439#[cfg_attr(test, assert_instr(smlsl2))]
14440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14441pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14442    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14443}
14444#[doc = "Multiply-subtract long"]
14445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14446#[inline]
14447#[target_feature(enable = "neon")]
14448#[cfg_attr(test, assert_instr(smlsl2))]
14449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14450pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14451    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14452}
14453#[doc = "Multiply-subtract long"]
14454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14455#[inline]
14456#[target_feature(enable = "neon")]
14457#[cfg_attr(test, assert_instr(umlsl2))]
14458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14459pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14460    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14461}
14462#[doc = "Multiply-subtract long"]
14463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14464#[inline]
14465#[target_feature(enable = "neon")]
14466#[cfg_attr(test, assert_instr(umlsl2))]
14467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14468pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14469    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14470}
14471#[doc = "Signed multiply-subtract long"]
14472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14473#[inline]
14474#[target_feature(enable = "neon")]
14475#[cfg_attr(test, assert_instr(smlsl2))]
14476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14477pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14478    unsafe {
14479        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14480        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14481        vmlsl_s8(a, b, c)
14482    }
14483}
14484#[doc = "Signed multiply-subtract long"]
14485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14486#[inline]
14487#[target_feature(enable = "neon")]
14488#[cfg_attr(test, assert_instr(smlsl2))]
14489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14490pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14491    unsafe {
14492        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14493        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14494        vmlsl_s16(a, b, c)
14495    }
14496}
14497#[doc = "Signed multiply-subtract long"]
14498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14499#[inline]
14500#[target_feature(enable = "neon")]
14501#[cfg_attr(test, assert_instr(smlsl2))]
14502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14503pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14504    unsafe {
14505        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14506        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14507        vmlsl_s32(a, b, c)
14508    }
14509}
14510#[doc = "Unsigned multiply-subtract long"]
14511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14512#[inline]
14513#[target_feature(enable = "neon")]
14514#[cfg_attr(test, assert_instr(umlsl2))]
14515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14516pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14517    unsafe {
14518        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14519        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14520        vmlsl_u8(a, b, c)
14521    }
14522}
14523#[doc = "Unsigned multiply-subtract long"]
14524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14525#[inline]
14526#[target_feature(enable = "neon")]
14527#[cfg_attr(test, assert_instr(umlsl2))]
14528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14529pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14530    unsafe {
14531        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14532        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14533        vmlsl_u16(a, b, c)
14534    }
14535}
14536#[doc = "Unsigned multiply-subtract long"]
14537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14538#[inline]
14539#[target_feature(enable = "neon")]
14540#[cfg_attr(test, assert_instr(umlsl2))]
14541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14542pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14543    unsafe {
14544        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14545        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14546        vmlsl_u32(a, b, c)
14547    }
14548}
14549#[doc = "Vector move"]
14550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14551#[inline]
14552#[target_feature(enable = "neon")]
14553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14554#[cfg_attr(test, assert_instr(sxtl2))]
14555pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14556    unsafe {
14557        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14558        vmovl_s8(a)
14559    }
14560}
14561#[doc = "Vector move"]
14562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14563#[inline]
14564#[target_feature(enable = "neon")]
14565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14566#[cfg_attr(test, assert_instr(sxtl2))]
14567pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14568    unsafe {
14569        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14570        vmovl_s16(a)
14571    }
14572}
14573#[doc = "Vector move"]
14574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14575#[inline]
14576#[target_feature(enable = "neon")]
14577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14578#[cfg_attr(test, assert_instr(sxtl2))]
14579pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14580    unsafe {
14581        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14582        vmovl_s32(a)
14583    }
14584}
14585#[doc = "Vector move"]
14586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14587#[inline]
14588#[target_feature(enable = "neon")]
14589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14590#[cfg_attr(test, assert_instr(uxtl2))]
14591pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14592    unsafe {
14593        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14594        vmovl_u8(a)
14595    }
14596}
14597#[doc = "Vector move"]
14598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14599#[inline]
14600#[target_feature(enable = "neon")]
14601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14602#[cfg_attr(test, assert_instr(uxtl2))]
14603pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14604    unsafe {
14605        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14606        vmovl_u16(a)
14607    }
14608}
14609#[doc = "Vector move"]
14610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14611#[inline]
14612#[target_feature(enable = "neon")]
14613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14614#[cfg_attr(test, assert_instr(uxtl2))]
14615pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14616    unsafe {
14617        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14618        vmovl_u32(a)
14619    }
14620}
14621#[doc = "Extract narrow"]
14622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14623#[inline]
14624#[target_feature(enable = "neon")]
14625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14626#[cfg_attr(test, assert_instr(xtn2))]
14627pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14628    unsafe {
14629        let c: int8x8_t = simd_cast(b);
14630        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14631    }
14632}
14633#[doc = "Extract narrow"]
14634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14635#[inline]
14636#[target_feature(enable = "neon")]
14637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14638#[cfg_attr(test, assert_instr(xtn2))]
14639pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14640    unsafe {
14641        let c: int16x4_t = simd_cast(b);
14642        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14643    }
14644}
14645#[doc = "Extract narrow"]
14646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14647#[inline]
14648#[target_feature(enable = "neon")]
14649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14650#[cfg_attr(test, assert_instr(xtn2))]
14651pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14652    unsafe {
14653        let c: int32x2_t = simd_cast(b);
14654        simd_shuffle!(a, c, [0, 1, 2, 3])
14655    }
14656}
14657#[doc = "Extract narrow"]
14658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14659#[inline]
14660#[target_feature(enable = "neon")]
14661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14662#[cfg_attr(test, assert_instr(xtn2))]
14663pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14664    unsafe {
14665        let c: uint8x8_t = simd_cast(b);
14666        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14667    }
14668}
14669#[doc = "Extract narrow"]
14670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14671#[inline]
14672#[target_feature(enable = "neon")]
14673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14674#[cfg_attr(test, assert_instr(xtn2))]
14675pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14676    unsafe {
14677        let c: uint16x4_t = simd_cast(b);
14678        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14679    }
14680}
14681#[doc = "Extract narrow"]
14682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14683#[inline]
14684#[target_feature(enable = "neon")]
14685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14686#[cfg_attr(test, assert_instr(xtn2))]
14687pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14688    unsafe {
14689        let c: uint32x2_t = simd_cast(b);
14690        simd_shuffle!(a, c, [0, 1, 2, 3])
14691    }
14692}
14693#[doc = "Multiply"]
14694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14695#[inline]
14696#[target_feature(enable = "neon")]
14697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14698#[cfg_attr(test, assert_instr(fmul))]
14699pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14700    unsafe { simd_mul(a, b) }
14701}
14702#[doc = "Multiply"]
14703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14704#[inline]
14705#[target_feature(enable = "neon")]
14706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14707#[cfg_attr(test, assert_instr(fmul))]
14708pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14709    unsafe { simd_mul(a, b) }
14710}
14711#[doc = "Floating-point multiply"]
14712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14713#[inline]
14714#[target_feature(enable = "neon")]
14715#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14716#[rustc_legacy_const_generics(2)]
14717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14718pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14719    static_assert!(LANE == 0);
14720    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14721}
14722#[doc = "Floating-point multiply"]
14723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14724#[inline]
14725#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14726#[rustc_legacy_const_generics(2)]
14727#[target_feature(enable = "neon,fp16")]
14728#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14729#[cfg(not(target_arch = "arm64ec"))]
14730pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14731    static_assert_uimm_bits!(LANE, 3);
14732    unsafe {
14733        simd_mul(
14734            a,
14735            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14736        )
14737    }
14738}
14739#[doc = "Floating-point multiply"]
14740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14741#[inline]
14742#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14743#[rustc_legacy_const_generics(2)]
14744#[target_feature(enable = "neon,fp16")]
14745#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14746#[cfg(not(target_arch = "arm64ec"))]
14747pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14748    static_assert_uimm_bits!(LANE, 3);
14749    unsafe {
14750        simd_mul(
14751            a,
14752            simd_shuffle!(
14753                b,
14754                b,
14755                [
14756                    LANE as u32,
14757                    LANE as u32,
14758                    LANE as u32,
14759                    LANE as u32,
14760                    LANE as u32,
14761                    LANE as u32,
14762                    LANE as u32,
14763                    LANE as u32
14764                ]
14765            ),
14766        )
14767    }
14768}
14769#[doc = "Floating-point multiply"]
14770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14771#[inline]
14772#[target_feature(enable = "neon")]
14773#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14774#[rustc_legacy_const_generics(2)]
14775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14776pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14777    static_assert_uimm_bits!(LANE, 1);
14778    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14779}
14780#[doc = "Vector multiply by scalar"]
14781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14782#[inline]
14783#[target_feature(enable = "neon")]
14784#[cfg_attr(test, assert_instr(fmul))]
14785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14786pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14787    unsafe { simd_mul(a, vdup_n_f64(b)) }
14788}
14789#[doc = "Vector multiply by scalar"]
14790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14791#[inline]
14792#[target_feature(enable = "neon")]
14793#[cfg_attr(test, assert_instr(fmul))]
14794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14795pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14796    unsafe { simd_mul(a, vdupq_n_f64(b)) }
14797}
14798#[doc = "Floating-point multiply"]
14799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14800#[inline]
14801#[target_feature(enable = "neon")]
14802#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14803#[rustc_legacy_const_generics(2)]
14804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14805pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14806    static_assert!(LANE == 0);
14807    unsafe {
14808        let b: f64 = simd_extract!(b, LANE as u32);
14809        a * b
14810    }
14811}
14812#[doc = "Add"]
14813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14814#[inline]
14815#[target_feature(enable = "neon,fp16")]
14816#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14817#[cfg(not(target_arch = "arm64ec"))]
14818#[cfg_attr(test, assert_instr(nop))]
14819pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14820    a * b
14821}
14822#[doc = "Floating-point multiply"]
14823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14824#[inline]
14825#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14826#[rustc_legacy_const_generics(2)]
14827#[target_feature(enable = "neon,fp16")]
14828#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14829#[cfg(not(target_arch = "arm64ec"))]
14830pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14831    static_assert_uimm_bits!(LANE, 2);
14832    unsafe {
14833        let b: f16 = simd_extract!(b, LANE as u32);
14834        a * b
14835    }
14836}
14837#[doc = "Floating-point multiply"]
14838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14839#[inline]
14840#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14841#[rustc_legacy_const_generics(2)]
14842#[target_feature(enable = "neon,fp16")]
14843#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14844#[cfg(not(target_arch = "arm64ec"))]
14845pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14846    static_assert_uimm_bits!(LANE, 3);
14847    unsafe {
14848        let b: f16 = simd_extract!(b, LANE as u32);
14849        a * b
14850    }
14851}
14852#[doc = "Multiply long"]
14853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14854#[inline]
14855#[target_feature(enable = "neon")]
14856#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14857#[rustc_legacy_const_generics(2)]
14858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14859pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14860    static_assert_uimm_bits!(LANE, 2);
14861    unsafe {
14862        vmull_high_s16(
14863            a,
14864            simd_shuffle!(
14865                b,
14866                b,
14867                [
14868                    LANE as u32,
14869                    LANE as u32,
14870                    LANE as u32,
14871                    LANE as u32,
14872                    LANE as u32,
14873                    LANE as u32,
14874                    LANE as u32,
14875                    LANE as u32
14876                ]
14877            ),
14878        )
14879    }
14880}
14881#[doc = "Multiply long"]
14882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
14883#[inline]
14884#[target_feature(enable = "neon")]
14885#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14886#[rustc_legacy_const_generics(2)]
14887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14888pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14889    static_assert_uimm_bits!(LANE, 3);
14890    unsafe {
14891        vmull_high_s16(
14892            a,
14893            simd_shuffle!(
14894                b,
14895                b,
14896                [
14897                    LANE as u32,
14898                    LANE as u32,
14899                    LANE as u32,
14900                    LANE as u32,
14901                    LANE as u32,
14902                    LANE as u32,
14903                    LANE as u32,
14904                    LANE as u32
14905                ]
14906            ),
14907        )
14908    }
14909}
14910#[doc = "Multiply long"]
14911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
14912#[inline]
14913#[target_feature(enable = "neon")]
14914#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14915#[rustc_legacy_const_generics(2)]
14916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14917pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
14918    static_assert_uimm_bits!(LANE, 1);
14919    unsafe {
14920        vmull_high_s32(
14921            a,
14922            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14923        )
14924    }
14925}
14926#[doc = "Multiply long"]
14927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
14928#[inline]
14929#[target_feature(enable = "neon")]
14930#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14931#[rustc_legacy_const_generics(2)]
14932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14933pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
14934    static_assert_uimm_bits!(LANE, 2);
14935    unsafe {
14936        vmull_high_s32(
14937            a,
14938            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14939        )
14940    }
14941}
14942#[doc = "Multiply long"]
14943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
14944#[inline]
14945#[target_feature(enable = "neon")]
14946#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14947#[rustc_legacy_const_generics(2)]
14948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14949pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
14950    static_assert_uimm_bits!(LANE, 2);
14951    unsafe {
14952        vmull_high_u16(
14953            a,
14954            simd_shuffle!(
14955                b,
14956                b,
14957                [
14958                    LANE as u32,
14959                    LANE as u32,
14960                    LANE as u32,
14961                    LANE as u32,
14962                    LANE as u32,
14963                    LANE as u32,
14964                    LANE as u32,
14965                    LANE as u32
14966                ]
14967            ),
14968        )
14969    }
14970}
14971#[doc = "Multiply long"]
14972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
14973#[inline]
14974#[target_feature(enable = "neon")]
14975#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14976#[rustc_legacy_const_generics(2)]
14977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14978pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
14979    static_assert_uimm_bits!(LANE, 3);
14980    unsafe {
14981        vmull_high_u16(
14982            a,
14983            simd_shuffle!(
14984                b,
14985                b,
14986                [
14987                    LANE as u32,
14988                    LANE as u32,
14989                    LANE as u32,
14990                    LANE as u32,
14991                    LANE as u32,
14992                    LANE as u32,
14993                    LANE as u32,
14994                    LANE as u32
14995                ]
14996            ),
14997        )
14998    }
14999}
15000#[doc = "Multiply long"]
15001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15002#[inline]
15003#[target_feature(enable = "neon")]
15004#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15005#[rustc_legacy_const_generics(2)]
15006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15007pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15008    static_assert_uimm_bits!(LANE, 1);
15009    unsafe {
15010        vmull_high_u32(
15011            a,
15012            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15013        )
15014    }
15015}
15016#[doc = "Multiply long"]
15017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15018#[inline]
15019#[target_feature(enable = "neon")]
15020#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15021#[rustc_legacy_const_generics(2)]
15022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15023pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15024    static_assert_uimm_bits!(LANE, 2);
15025    unsafe {
15026        vmull_high_u32(
15027            a,
15028            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15029        )
15030    }
15031}
15032#[doc = "Multiply long"]
15033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15034#[inline]
15035#[target_feature(enable = "neon")]
15036#[cfg_attr(test, assert_instr(smull2))]
15037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15038pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15039    vmull_high_s16(a, vdupq_n_s16(b))
15040}
15041#[doc = "Multiply long"]
15042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15043#[inline]
15044#[target_feature(enable = "neon")]
15045#[cfg_attr(test, assert_instr(smull2))]
15046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15047pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15048    vmull_high_s32(a, vdupq_n_s32(b))
15049}
15050#[doc = "Multiply long"]
15051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15052#[inline]
15053#[target_feature(enable = "neon")]
15054#[cfg_attr(test, assert_instr(umull2))]
15055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15056pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15057    vmull_high_u16(a, vdupq_n_u16(b))
15058}
15059#[doc = "Multiply long"]
15060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15061#[inline]
15062#[target_feature(enable = "neon")]
15063#[cfg_attr(test, assert_instr(umull2))]
15064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15065pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15066    vmull_high_u32(a, vdupq_n_u32(b))
15067}
15068#[doc = "Polynomial multiply long"]
15069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15070#[inline]
15071#[target_feature(enable = "neon,aes")]
15072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15073#[cfg_attr(test, assert_instr(pmull2))]
15074pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15075    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15076}
15077#[doc = "Polynomial multiply long"]
15078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15079#[inline]
15080#[target_feature(enable = "neon")]
15081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15082#[cfg_attr(test, assert_instr(pmull2))]
15083pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15084    unsafe {
15085        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15086        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15087        vmull_p8(a, b)
15088    }
15089}
15090#[doc = "Signed multiply long"]
15091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15092#[inline]
15093#[target_feature(enable = "neon")]
15094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15095#[cfg_attr(test, assert_instr(smull2))]
15096pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15097    unsafe {
15098        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15099        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15100        vmull_s8(a, b)
15101    }
15102}
15103#[doc = "Signed multiply long"]
15104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15105#[inline]
15106#[target_feature(enable = "neon")]
15107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15108#[cfg_attr(test, assert_instr(smull2))]
15109pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15110    unsafe {
15111        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15112        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15113        vmull_s16(a, b)
15114    }
15115}
15116#[doc = "Signed multiply long"]
15117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15118#[inline]
15119#[target_feature(enable = "neon")]
15120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15121#[cfg_attr(test, assert_instr(smull2))]
15122pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15123    unsafe {
15124        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15125        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15126        vmull_s32(a, b)
15127    }
15128}
15129#[doc = "Unsigned multiply long"]
15130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15131#[inline]
15132#[target_feature(enable = "neon")]
15133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15134#[cfg_attr(test, assert_instr(umull2))]
15135pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15136    unsafe {
15137        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15138        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15139        vmull_u8(a, b)
15140    }
15141}
15142#[doc = "Unsigned multiply long"]
15143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15144#[inline]
15145#[target_feature(enable = "neon")]
15146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15147#[cfg_attr(test, assert_instr(umull2))]
15148pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15149    unsafe {
15150        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15151        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15152        vmull_u16(a, b)
15153    }
15154}
15155#[doc = "Unsigned multiply long"]
15156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15157#[inline]
15158#[target_feature(enable = "neon")]
15159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15160#[cfg_attr(test, assert_instr(umull2))]
15161pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15162    unsafe {
15163        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15164        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15165        vmull_u32(a, b)
15166    }
15167}
15168#[doc = "Polynomial multiply long"]
15169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15170#[inline]
15171#[target_feature(enable = "neon,aes")]
15172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15173#[cfg_attr(test, assert_instr(pmull))]
15174pub fn vmull_p64(a: p64, b: p64) -> p128 {
15175    unsafe extern "unadjusted" {
15176        #[cfg_attr(
15177            any(target_arch = "aarch64", target_arch = "arm64ec"),
15178            link_name = "llvm.aarch64.neon.pmull64"
15179        )]
15180        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15181    }
15182    unsafe { transmute(_vmull_p64(a, b)) }
15183}
15184#[doc = "Floating-point multiply"]
15185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15186#[inline]
15187#[target_feature(enable = "neon")]
15188#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15189#[rustc_legacy_const_generics(2)]
15190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15191pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15192    static_assert!(LANE == 0);
15193    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15194}
15195#[doc = "Floating-point multiply"]
15196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15197#[inline]
15198#[target_feature(enable = "neon")]
15199#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15200#[rustc_legacy_const_generics(2)]
15201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15202pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15203    static_assert_uimm_bits!(LANE, 1);
15204    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15205}
15206#[doc = "Floating-point multiply"]
15207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15208#[inline]
15209#[target_feature(enable = "neon")]
15210#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15211#[rustc_legacy_const_generics(2)]
15212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15213pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15214    static_assert_uimm_bits!(LANE, 1);
15215    unsafe {
15216        let b: f32 = simd_extract!(b, LANE as u32);
15217        a * b
15218    }
15219}
15220#[doc = "Floating-point multiply"]
15221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15222#[inline]
15223#[target_feature(enable = "neon")]
15224#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15225#[rustc_legacy_const_generics(2)]
15226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15227pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15228    static_assert_uimm_bits!(LANE, 2);
15229    unsafe {
15230        let b: f32 = simd_extract!(b, LANE as u32);
15231        a * b
15232    }
15233}
15234#[doc = "Floating-point multiply"]
15235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15236#[inline]
15237#[target_feature(enable = "neon")]
15238#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15239#[rustc_legacy_const_generics(2)]
15240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15241pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15242    static_assert_uimm_bits!(LANE, 1);
15243    unsafe {
15244        let b: f64 = simd_extract!(b, LANE as u32);
15245        a * b
15246    }
15247}
15248#[doc = "Floating-point multiply extended"]
15249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15250#[inline]
15251#[target_feature(enable = "neon,fp16")]
15252#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15253#[cfg(not(target_arch = "arm64ec"))]
15254#[cfg_attr(test, assert_instr(fmulx))]
15255pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15256    unsafe extern "unadjusted" {
15257        #[cfg_attr(
15258            any(target_arch = "aarch64", target_arch = "arm64ec"),
15259            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15260        )]
15261        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15262    }
15263    unsafe { _vmulx_f16(a, b) }
15264}
15265#[doc = "Floating-point multiply extended"]
15266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15267#[inline]
15268#[target_feature(enable = "neon,fp16")]
15269#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15270#[cfg(not(target_arch = "arm64ec"))]
15271#[cfg_attr(test, assert_instr(fmulx))]
15272pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15273    unsafe extern "unadjusted" {
15274        #[cfg_attr(
15275            any(target_arch = "aarch64", target_arch = "arm64ec"),
15276            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15277        )]
15278        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15279    }
15280    unsafe { _vmulxq_f16(a, b) }
15281}
15282#[doc = "Floating-point multiply extended"]
15283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15284#[inline]
15285#[target_feature(enable = "neon")]
15286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15287#[cfg_attr(test, assert_instr(fmulx))]
15288pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15289    unsafe extern "unadjusted" {
15290        #[cfg_attr(
15291            any(target_arch = "aarch64", target_arch = "arm64ec"),
15292            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15293        )]
15294        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15295    }
15296    unsafe { _vmulx_f32(a, b) }
15297}
15298#[doc = "Floating-point multiply extended"]
15299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15300#[inline]
15301#[target_feature(enable = "neon")]
15302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15303#[cfg_attr(test, assert_instr(fmulx))]
15304pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15305    unsafe extern "unadjusted" {
15306        #[cfg_attr(
15307            any(target_arch = "aarch64", target_arch = "arm64ec"),
15308            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15309        )]
15310        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15311    }
15312    unsafe { _vmulxq_f32(a, b) }
15313}
15314#[doc = "Floating-point multiply extended"]
15315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15316#[inline]
15317#[target_feature(enable = "neon")]
15318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15319#[cfg_attr(test, assert_instr(fmulx))]
15320pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15321    unsafe extern "unadjusted" {
15322        #[cfg_attr(
15323            any(target_arch = "aarch64", target_arch = "arm64ec"),
15324            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15325        )]
15326        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15327    }
15328    unsafe { _vmulx_f64(a, b) }
15329}
15330#[doc = "Floating-point multiply extended"]
15331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15332#[inline]
15333#[target_feature(enable = "neon")]
15334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15335#[cfg_attr(test, assert_instr(fmulx))]
15336pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15337    unsafe extern "unadjusted" {
15338        #[cfg_attr(
15339            any(target_arch = "aarch64", target_arch = "arm64ec"),
15340            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15341        )]
15342        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15343    }
15344    unsafe { _vmulxq_f64(a, b) }
15345}
15346#[doc = "Floating-point multiply extended"]
15347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15348#[inline]
15349#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15350#[rustc_legacy_const_generics(2)]
15351#[target_feature(enable = "neon,fp16")]
15352#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15353#[cfg(not(target_arch = "arm64ec"))]
15354pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15355    static_assert_uimm_bits!(LANE, 2);
15356    unsafe {
15357        vmulx_f16(
15358            a,
15359            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15360        )
15361    }
15362}
15363#[doc = "Floating-point multiply extended"]
15364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15365#[inline]
15366#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15367#[rustc_legacy_const_generics(2)]
15368#[target_feature(enable = "neon,fp16")]
15369#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15370#[cfg(not(target_arch = "arm64ec"))]
15371pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15372    static_assert_uimm_bits!(LANE, 3);
15373    unsafe {
15374        vmulx_f16(
15375            a,
15376            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15377        )
15378    }
15379}
15380#[doc = "Floating-point multiply extended"]
15381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15382#[inline]
15383#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15384#[rustc_legacy_const_generics(2)]
15385#[target_feature(enable = "neon,fp16")]
15386#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15387#[cfg(not(target_arch = "arm64ec"))]
15388pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15389    static_assert_uimm_bits!(LANE, 2);
15390    unsafe {
15391        vmulxq_f16(
15392            a,
15393            simd_shuffle!(
15394                b,
15395                b,
15396                [
15397                    LANE as u32,
15398                    LANE as u32,
15399                    LANE as u32,
15400                    LANE as u32,
15401                    LANE as u32,
15402                    LANE as u32,
15403                    LANE as u32,
15404                    LANE as u32
15405                ]
15406            ),
15407        )
15408    }
15409}
15410#[doc = "Floating-point multiply extended"]
15411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15412#[inline]
15413#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15414#[rustc_legacy_const_generics(2)]
15415#[target_feature(enable = "neon,fp16")]
15416#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15417#[cfg(not(target_arch = "arm64ec"))]
15418pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15419    static_assert_uimm_bits!(LANE, 3);
15420    unsafe {
15421        vmulxq_f16(
15422            a,
15423            simd_shuffle!(
15424                b,
15425                b,
15426                [
15427                    LANE as u32,
15428                    LANE as u32,
15429                    LANE as u32,
15430                    LANE as u32,
15431                    LANE as u32,
15432                    LANE as u32,
15433                    LANE as u32,
15434                    LANE as u32
15435                ]
15436            ),
15437        )
15438    }
15439}
15440#[doc = "Floating-point multiply extended"]
15441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15442#[inline]
15443#[target_feature(enable = "neon")]
15444#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15445#[rustc_legacy_const_generics(2)]
15446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15447pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15448    static_assert_uimm_bits!(LANE, 1);
15449    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15450}
15451#[doc = "Floating-point multiply extended"]
15452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15453#[inline]
15454#[target_feature(enable = "neon")]
15455#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15456#[rustc_legacy_const_generics(2)]
15457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15458pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15459    static_assert_uimm_bits!(LANE, 2);
15460    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15461}
15462#[doc = "Floating-point multiply extended"]
15463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15464#[inline]
15465#[target_feature(enable = "neon")]
15466#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15467#[rustc_legacy_const_generics(2)]
15468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15469pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15470    static_assert_uimm_bits!(LANE, 1);
15471    unsafe {
15472        vmulxq_f32(
15473            a,
15474            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15475        )
15476    }
15477}
15478#[doc = "Floating-point multiply extended"]
15479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15480#[inline]
15481#[target_feature(enable = "neon")]
15482#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15483#[rustc_legacy_const_generics(2)]
15484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15485pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15486    static_assert_uimm_bits!(LANE, 2);
15487    unsafe {
15488        vmulxq_f32(
15489            a,
15490            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15491        )
15492    }
15493}
15494#[doc = "Floating-point multiply extended"]
15495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15496#[inline]
15497#[target_feature(enable = "neon")]
15498#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15499#[rustc_legacy_const_generics(2)]
15500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15501pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15502    static_assert_uimm_bits!(LANE, 1);
15503    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15504}
15505#[doc = "Floating-point multiply extended"]
15506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15507#[inline]
15508#[target_feature(enable = "neon")]
15509#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15510#[rustc_legacy_const_generics(2)]
15511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15512pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15513    static_assert!(LANE == 0);
15514    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15515}
15516#[doc = "Floating-point multiply extended"]
15517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15518#[inline]
15519#[target_feature(enable = "neon")]
15520#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15521#[rustc_legacy_const_generics(2)]
15522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15523pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15524    static_assert_uimm_bits!(LANE, 1);
15525    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15526}
15527#[doc = "Vector multiply by scalar"]
15528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15529#[inline]
15530#[cfg_attr(test, assert_instr(fmulx))]
15531#[target_feature(enable = "neon,fp16")]
15532#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15533#[cfg(not(target_arch = "arm64ec"))]
15534pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15535    vmulx_f16(a, vdup_n_f16(b))
15536}
15537#[doc = "Vector multiply by scalar"]
15538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15539#[inline]
15540#[cfg_attr(test, assert_instr(fmulx))]
15541#[target_feature(enable = "neon,fp16")]
15542#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15543#[cfg(not(target_arch = "arm64ec"))]
15544pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15545    vmulxq_f16(a, vdupq_n_f16(b))
15546}
15547#[doc = "Floating-point multiply extended"]
15548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15549#[inline]
15550#[target_feature(enable = "neon")]
15551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15552#[cfg_attr(test, assert_instr(fmulx))]
15553pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15554    unsafe extern "unadjusted" {
15555        #[cfg_attr(
15556            any(target_arch = "aarch64", target_arch = "arm64ec"),
15557            link_name = "llvm.aarch64.neon.fmulx.f64"
15558        )]
15559        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15560    }
15561    unsafe { _vmulxd_f64(a, b) }
15562}
15563#[doc = "Floating-point multiply extended"]
15564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15565#[inline]
15566#[target_feature(enable = "neon")]
15567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15568#[cfg_attr(test, assert_instr(fmulx))]
15569pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15570    unsafe extern "unadjusted" {
15571        #[cfg_attr(
15572            any(target_arch = "aarch64", target_arch = "arm64ec"),
15573            link_name = "llvm.aarch64.neon.fmulx.f32"
15574        )]
15575        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15576    }
15577    unsafe { _vmulxs_f32(a, b) }
15578}
15579#[doc = "Floating-point multiply extended"]
15580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15581#[inline]
15582#[target_feature(enable = "neon")]
15583#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15584#[rustc_legacy_const_generics(2)]
15585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15586pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15587    static_assert!(LANE == 0);
15588    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15589}
15590#[doc = "Floating-point multiply extended"]
15591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15592#[inline]
15593#[target_feature(enable = "neon")]
15594#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15595#[rustc_legacy_const_generics(2)]
15596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15597pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15598    static_assert_uimm_bits!(LANE, 1);
15599    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15600}
15601#[doc = "Floating-point multiply extended"]
15602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15603#[inline]
15604#[target_feature(enable = "neon")]
15605#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15606#[rustc_legacy_const_generics(2)]
15607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15608pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15609    static_assert_uimm_bits!(LANE, 1);
15610    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15611}
15612#[doc = "Floating-point multiply extended"]
15613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15614#[inline]
15615#[target_feature(enable = "neon")]
15616#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15617#[rustc_legacy_const_generics(2)]
15618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15619pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15620    static_assert_uimm_bits!(LANE, 2);
15621    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15622}
15623#[doc = "Floating-point multiply extended"]
15624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15625#[inline]
15626#[target_feature(enable = "neon,fp16")]
15627#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15628#[cfg(not(target_arch = "arm64ec"))]
15629#[cfg_attr(test, assert_instr(fmulx))]
15630pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15631    unsafe extern "unadjusted" {
15632        #[cfg_attr(
15633            any(target_arch = "aarch64", target_arch = "arm64ec"),
15634            link_name = "llvm.aarch64.neon.fmulx.f16"
15635        )]
15636        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15637    }
15638    unsafe { _vmulxh_f16(a, b) }
15639}
15640#[doc = "Floating-point multiply extended"]
15641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15642#[inline]
15643#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15644#[rustc_legacy_const_generics(2)]
15645#[target_feature(enable = "neon,fp16")]
15646#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15647#[cfg(not(target_arch = "arm64ec"))]
15648pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15649    static_assert_uimm_bits!(LANE, 2);
15650    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15651}
15652#[doc = "Floating-point multiply extended"]
15653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15654#[inline]
15655#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15656#[rustc_legacy_const_generics(2)]
15657#[target_feature(enable = "neon,fp16")]
15658#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15659#[cfg(not(target_arch = "arm64ec"))]
15660pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15661    static_assert_uimm_bits!(LANE, 3);
15662    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15663}
15664#[doc = "Floating-point multiply extended"]
15665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15666#[inline]
15667#[target_feature(enable = "neon")]
15668#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15669#[rustc_legacy_const_generics(2)]
15670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15671pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15672    static_assert!(LANE == 0);
15673    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15674}
15675#[doc = "Negate"]
15676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15677#[inline]
15678#[target_feature(enable = "neon")]
15679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15680#[cfg_attr(test, assert_instr(fneg))]
15681pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15682    unsafe { simd_neg(a) }
15683}
15684#[doc = "Negate"]
15685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15686#[inline]
15687#[target_feature(enable = "neon")]
15688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15689#[cfg_attr(test, assert_instr(fneg))]
15690pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15691    unsafe { simd_neg(a) }
15692}
15693#[doc = "Negate"]
15694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15695#[inline]
15696#[target_feature(enable = "neon")]
15697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15698#[cfg_attr(test, assert_instr(neg))]
15699pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15700    unsafe { simd_neg(a) }
15701}
15702#[doc = "Negate"]
15703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15704#[inline]
15705#[target_feature(enable = "neon")]
15706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15707#[cfg_attr(test, assert_instr(neg))]
15708pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15709    unsafe { simd_neg(a) }
15710}
15711#[doc = "Negate"]
15712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15713#[inline]
15714#[target_feature(enable = "neon")]
15715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15716#[cfg_attr(test, assert_instr(neg))]
15717pub fn vnegd_s64(a: i64) -> i64 {
15718    a.wrapping_neg()
15719}
15720#[doc = "Negate"]
15721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15722#[inline]
15723#[target_feature(enable = "neon,fp16")]
15724#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15725#[cfg(not(target_arch = "arm64ec"))]
15726#[cfg_attr(test, assert_instr(fneg))]
15727pub fn vnegh_f16(a: f16) -> f16 {
15728    -a
15729}
15730#[doc = "Floating-point add pairwise"]
15731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15732#[inline]
15733#[target_feature(enable = "neon")]
15734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15735#[cfg_attr(test, assert_instr(nop))]
15736pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15737    unsafe {
15738        let a1: f64 = simd_extract!(a, 0);
15739        let a2: f64 = simd_extract!(a, 1);
15740        a1 + a2
15741    }
15742}
15743#[doc = "Floating-point add pairwise"]
15744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15745#[inline]
15746#[target_feature(enable = "neon")]
15747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15748#[cfg_attr(test, assert_instr(nop))]
15749pub fn vpadds_f32(a: float32x2_t) -> f32 {
15750    unsafe {
15751        let a1: f32 = simd_extract!(a, 0);
15752        let a2: f32 = simd_extract!(a, 1);
15753        a1 + a2
15754    }
15755}
15756#[doc = "Add pairwise"]
15757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15758#[inline]
15759#[target_feature(enable = "neon")]
15760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15761#[cfg_attr(test, assert_instr(addp))]
15762pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15763    unsafe { simd_reduce_add_unordered(a) }
15764}
15765#[doc = "Add pairwise"]
15766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15767#[inline]
15768#[target_feature(enable = "neon")]
15769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15770#[cfg_attr(test, assert_instr(addp))]
15771pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15772    unsafe { simd_reduce_add_unordered(a) }
15773}
15774#[doc = "Floating-point add pairwise"]
15775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15776#[inline]
15777#[target_feature(enable = "neon,fp16")]
15778#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15779#[cfg(not(target_arch = "arm64ec"))]
15780#[cfg_attr(test, assert_instr(faddp))]
15781pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15782    unsafe extern "unadjusted" {
15783        #[cfg_attr(
15784            any(target_arch = "aarch64", target_arch = "arm64ec"),
15785            link_name = "llvm.aarch64.neon.faddp.v8f16"
15786        )]
15787        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15788    }
15789    unsafe { _vpaddq_f16(a, b) }
15790}
15791#[doc = "Floating-point add pairwise"]
15792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15793#[inline]
15794#[target_feature(enable = "neon")]
15795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15796#[cfg_attr(test, assert_instr(faddp))]
15797pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15798    unsafe extern "unadjusted" {
15799        #[cfg_attr(
15800            any(target_arch = "aarch64", target_arch = "arm64ec"),
15801            link_name = "llvm.aarch64.neon.faddp.v4f32"
15802        )]
15803        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15804    }
15805    unsafe { _vpaddq_f32(a, b) }
15806}
15807#[doc = "Floating-point add pairwise"]
15808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15809#[inline]
15810#[target_feature(enable = "neon")]
15811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15812#[cfg_attr(test, assert_instr(faddp))]
15813pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15814    unsafe extern "unadjusted" {
15815        #[cfg_attr(
15816            any(target_arch = "aarch64", target_arch = "arm64ec"),
15817            link_name = "llvm.aarch64.neon.faddp.v2f64"
15818        )]
15819        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15820    }
15821    unsafe { _vpaddq_f64(a, b) }
15822}
15823#[doc = "Add Pairwise"]
15824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15825#[inline]
15826#[target_feature(enable = "neon")]
15827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15828#[cfg_attr(test, assert_instr(addp))]
15829pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15830    unsafe extern "unadjusted" {
15831        #[cfg_attr(
15832            any(target_arch = "aarch64", target_arch = "arm64ec"),
15833            link_name = "llvm.aarch64.neon.addp.v16i8"
15834        )]
15835        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
15836    }
15837    unsafe { _vpaddq_s8(a, b) }
15838}
15839#[doc = "Add Pairwise"]
15840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15841#[inline]
15842#[target_feature(enable = "neon")]
15843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15844#[cfg_attr(test, assert_instr(addp))]
15845pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15846    unsafe extern "unadjusted" {
15847        #[cfg_attr(
15848            any(target_arch = "aarch64", target_arch = "arm64ec"),
15849            link_name = "llvm.aarch64.neon.addp.v8i16"
15850        )]
15851        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
15852    }
15853    unsafe { _vpaddq_s16(a, b) }
15854}
15855#[doc = "Add Pairwise"]
15856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15857#[inline]
15858#[target_feature(enable = "neon")]
15859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15860#[cfg_attr(test, assert_instr(addp))]
15861pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15862    unsafe extern "unadjusted" {
15863        #[cfg_attr(
15864            any(target_arch = "aarch64", target_arch = "arm64ec"),
15865            link_name = "llvm.aarch64.neon.addp.v4i32"
15866        )]
15867        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
15868    }
15869    unsafe { _vpaddq_s32(a, b) }
15870}
15871#[doc = "Add Pairwise"]
15872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15873#[inline]
15874#[target_feature(enable = "neon")]
15875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15876#[cfg_attr(test, assert_instr(addp))]
15877pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15878    unsafe extern "unadjusted" {
15879        #[cfg_attr(
15880            any(target_arch = "aarch64", target_arch = "arm64ec"),
15881            link_name = "llvm.aarch64.neon.addp.v2i64"
15882        )]
15883        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
15884    }
15885    unsafe { _vpaddq_s64(a, b) }
15886}
15887#[doc = "Add Pairwise"]
15888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15889#[inline]
15890#[cfg(target_endian = "little")]
15891#[target_feature(enable = "neon")]
15892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15893#[cfg_attr(test, assert_instr(addp))]
15894pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15895    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
15896}
15897#[doc = "Add Pairwise"]
15898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15899#[inline]
15900#[cfg(target_endian = "big")]
15901#[target_feature(enable = "neon")]
15902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15903#[cfg_attr(test, assert_instr(addp))]
15904pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15905    let a: uint8x16_t =
15906        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15907    let b: uint8x16_t =
15908        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15909    unsafe {
15910        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
15911        simd_shuffle!(
15912            ret_val,
15913            ret_val,
15914            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
15915        )
15916    }
15917}
15918#[doc = "Add Pairwise"]
15919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15920#[inline]
15921#[cfg(target_endian = "little")]
15922#[target_feature(enable = "neon")]
15923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15924#[cfg_attr(test, assert_instr(addp))]
15925pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15926    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
15927}
15928#[doc = "Add Pairwise"]
15929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15930#[inline]
15931#[cfg(target_endian = "big")]
15932#[target_feature(enable = "neon")]
15933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15934#[cfg_attr(test, assert_instr(addp))]
15935pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15936    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
15937    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
15938    unsafe {
15939        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
15940        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
15941    }
15942}
15943#[doc = "Add Pairwise"]
15944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15945#[inline]
15946#[cfg(target_endian = "little")]
15947#[target_feature(enable = "neon")]
15948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15949#[cfg_attr(test, assert_instr(addp))]
15950pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15951    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
15952}
15953#[doc = "Add Pairwise"]
15954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15955#[inline]
15956#[cfg(target_endian = "big")]
15957#[target_feature(enable = "neon")]
15958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15959#[cfg_attr(test, assert_instr(addp))]
15960pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15961    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
15962    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
15963    unsafe {
15964        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
15965        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
15966    }
15967}
15968#[doc = "Add Pairwise"]
15969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15970#[inline]
15971#[cfg(target_endian = "little")]
15972#[target_feature(enable = "neon")]
15973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15974#[cfg_attr(test, assert_instr(addp))]
15975pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15976    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
15977}
15978#[doc = "Add Pairwise"]
15979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15980#[inline]
15981#[cfg(target_endian = "big")]
15982#[target_feature(enable = "neon")]
15983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15984#[cfg_attr(test, assert_instr(addp))]
15985pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15986    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
15987    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
15988    unsafe {
15989        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
15990        simd_shuffle!(ret_val, ret_val, [1, 0])
15991    }
15992}
15993#[doc = "Floating-point add pairwise"]
15994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
15995#[inline]
15996#[target_feature(enable = "neon,fp16")]
15997#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15998#[cfg(not(target_arch = "arm64ec"))]
15999#[cfg_attr(test, assert_instr(fmaxp))]
16000pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16001    unsafe extern "unadjusted" {
16002        #[cfg_attr(
16003            any(target_arch = "aarch64", target_arch = "arm64ec"),
16004            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16005        )]
16006        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16007    }
16008    unsafe { _vpmax_f16(a, b) }
16009}
16010#[doc = "Floating-point add pairwise"]
16011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16012#[inline]
16013#[target_feature(enable = "neon,fp16")]
16014#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16015#[cfg(not(target_arch = "arm64ec"))]
16016#[cfg_attr(test, assert_instr(fmaxp))]
16017pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16018    unsafe extern "unadjusted" {
16019        #[cfg_attr(
16020            any(target_arch = "aarch64", target_arch = "arm64ec"),
16021            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16022        )]
16023        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16024    }
16025    unsafe { _vpmaxq_f16(a, b) }
16026}
16027#[doc = "Floating-point add pairwise"]
16028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16029#[inline]
16030#[target_feature(enable = "neon,fp16")]
16031#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16032#[cfg(not(target_arch = "arm64ec"))]
16033#[cfg_attr(test, assert_instr(fmaxnmp))]
16034pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16035    unsafe extern "unadjusted" {
16036        #[cfg_attr(
16037            any(target_arch = "aarch64", target_arch = "arm64ec"),
16038            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16039        )]
16040        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16041    }
16042    unsafe { _vpmaxnm_f16(a, b) }
16043}
16044#[doc = "Floating-point add pairwise"]
16045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16046#[inline]
16047#[target_feature(enable = "neon,fp16")]
16048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16049#[cfg(not(target_arch = "arm64ec"))]
16050#[cfg_attr(test, assert_instr(fmaxnmp))]
16051pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16052    unsafe extern "unadjusted" {
16053        #[cfg_attr(
16054            any(target_arch = "aarch64", target_arch = "arm64ec"),
16055            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16056        )]
16057        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16058    }
16059    unsafe { _vpmaxnmq_f16(a, b) }
16060}
16061#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16063#[inline]
16064#[target_feature(enable = "neon")]
16065#[cfg_attr(test, assert_instr(fmaxnmp))]
16066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16067pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16068    unsafe extern "unadjusted" {
16069        #[cfg_attr(
16070            any(target_arch = "aarch64", target_arch = "arm64ec"),
16071            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16072        )]
16073        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16074    }
16075    unsafe { _vpmaxnm_f32(a, b) }
16076}
16077#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16079#[inline]
16080#[target_feature(enable = "neon")]
16081#[cfg_attr(test, assert_instr(fmaxnmp))]
16082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16083pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16084    unsafe extern "unadjusted" {
16085        #[cfg_attr(
16086            any(target_arch = "aarch64", target_arch = "arm64ec"),
16087            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16088        )]
16089        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16090    }
16091    unsafe { _vpmaxnmq_f32(a, b) }
16092}
16093#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16095#[inline]
16096#[target_feature(enable = "neon")]
16097#[cfg_attr(test, assert_instr(fmaxnmp))]
16098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16099pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16100    unsafe extern "unadjusted" {
16101        #[cfg_attr(
16102            any(target_arch = "aarch64", target_arch = "arm64ec"),
16103            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16104        )]
16105        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16106    }
16107    unsafe { _vpmaxnmq_f64(a, b) }
16108}
16109#[doc = "Floating-point maximum number pairwise"]
16110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16111#[inline]
16112#[target_feature(enable = "neon")]
16113#[cfg_attr(test, assert_instr(fmaxnmp))]
16114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16115pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16116    unsafe extern "unadjusted" {
16117        #[cfg_attr(
16118            any(target_arch = "aarch64", target_arch = "arm64ec"),
16119            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16120        )]
16121        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16122    }
16123    unsafe { _vpmaxnmqd_f64(a) }
16124}
16125#[doc = "Floating-point maximum number pairwise"]
16126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16127#[inline]
16128#[target_feature(enable = "neon")]
16129#[cfg_attr(test, assert_instr(fmaxnmp))]
16130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16131pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16132    unsafe extern "unadjusted" {
16133        #[cfg_attr(
16134            any(target_arch = "aarch64", target_arch = "arm64ec"),
16135            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16136        )]
16137        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16138    }
16139    unsafe { _vpmaxnms_f32(a) }
16140}
16141#[doc = "Folding maximum of adjacent pairs"]
16142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16143#[inline]
16144#[target_feature(enable = "neon")]
16145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16146#[cfg_attr(test, assert_instr(fmaxp))]
16147pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16148    unsafe extern "unadjusted" {
16149        #[cfg_attr(
16150            any(target_arch = "aarch64", target_arch = "arm64ec"),
16151            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16152        )]
16153        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16154    }
16155    unsafe { _vpmaxq_f32(a, b) }
16156}
16157#[doc = "Folding maximum of adjacent pairs"]
16158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16159#[inline]
16160#[target_feature(enable = "neon")]
16161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16162#[cfg_attr(test, assert_instr(fmaxp))]
16163pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16164    unsafe extern "unadjusted" {
16165        #[cfg_attr(
16166            any(target_arch = "aarch64", target_arch = "arm64ec"),
16167            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16168        )]
16169        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16170    }
16171    unsafe { _vpmaxq_f64(a, b) }
16172}
16173#[doc = "Folding maximum of adjacent pairs"]
16174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16175#[inline]
16176#[target_feature(enable = "neon")]
16177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16178#[cfg_attr(test, assert_instr(smaxp))]
16179pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16180    unsafe extern "unadjusted" {
16181        #[cfg_attr(
16182            any(target_arch = "aarch64", target_arch = "arm64ec"),
16183            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16184        )]
16185        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16186    }
16187    unsafe { _vpmaxq_s8(a, b) }
16188}
16189#[doc = "Folding maximum of adjacent pairs"]
16190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16191#[inline]
16192#[target_feature(enable = "neon")]
16193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16194#[cfg_attr(test, assert_instr(smaxp))]
16195pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16196    unsafe extern "unadjusted" {
16197        #[cfg_attr(
16198            any(target_arch = "aarch64", target_arch = "arm64ec"),
16199            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16200        )]
16201        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16202    }
16203    unsafe { _vpmaxq_s16(a, b) }
16204}
16205#[doc = "Folding maximum of adjacent pairs"]
16206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16207#[inline]
16208#[target_feature(enable = "neon")]
16209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16210#[cfg_attr(test, assert_instr(smaxp))]
16211pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16212    unsafe extern "unadjusted" {
16213        #[cfg_attr(
16214            any(target_arch = "aarch64", target_arch = "arm64ec"),
16215            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16216        )]
16217        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16218    }
16219    unsafe { _vpmaxq_s32(a, b) }
16220}
16221#[doc = "Folding maximum of adjacent pairs"]
16222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16223#[inline]
16224#[target_feature(enable = "neon")]
16225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16226#[cfg_attr(test, assert_instr(umaxp))]
16227pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16228    unsafe extern "unadjusted" {
16229        #[cfg_attr(
16230            any(target_arch = "aarch64", target_arch = "arm64ec"),
16231            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16232        )]
16233        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16234    }
16235    unsafe { _vpmaxq_u8(a, b) }
16236}
16237#[doc = "Folding maximum of adjacent pairs"]
16238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16239#[inline]
16240#[target_feature(enable = "neon")]
16241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16242#[cfg_attr(test, assert_instr(umaxp))]
16243pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16244    unsafe extern "unadjusted" {
16245        #[cfg_attr(
16246            any(target_arch = "aarch64", target_arch = "arm64ec"),
16247            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16248        )]
16249        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16250    }
16251    unsafe { _vpmaxq_u16(a, b) }
16252}
16253#[doc = "Folding maximum of adjacent pairs"]
16254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16255#[inline]
16256#[target_feature(enable = "neon")]
16257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16258#[cfg_attr(test, assert_instr(umaxp))]
16259pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16260    unsafe extern "unadjusted" {
16261        #[cfg_attr(
16262            any(target_arch = "aarch64", target_arch = "arm64ec"),
16263            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16264        )]
16265        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16266    }
16267    unsafe { _vpmaxq_u32(a, b) }
16268}
16269#[doc = "Floating-point maximum pairwise"]
16270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16271#[inline]
16272#[target_feature(enable = "neon")]
16273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16274#[cfg_attr(test, assert_instr(fmaxp))]
16275pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16276    unsafe extern "unadjusted" {
16277        #[cfg_attr(
16278            any(target_arch = "aarch64", target_arch = "arm64ec"),
16279            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16280        )]
16281        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16282    }
16283    unsafe { _vpmaxqd_f64(a) }
16284}
16285#[doc = "Floating-point maximum pairwise"]
16286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16287#[inline]
16288#[target_feature(enable = "neon")]
16289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16290#[cfg_attr(test, assert_instr(fmaxp))]
16291pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16292    unsafe extern "unadjusted" {
16293        #[cfg_attr(
16294            any(target_arch = "aarch64", target_arch = "arm64ec"),
16295            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16296        )]
16297        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16298    }
16299    unsafe { _vpmaxs_f32(a) }
16300}
16301#[doc = "Floating-point add pairwise"]
16302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16303#[inline]
16304#[target_feature(enable = "neon,fp16")]
16305#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16306#[cfg(not(target_arch = "arm64ec"))]
16307#[cfg_attr(test, assert_instr(fminp))]
16308pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16309    unsafe extern "unadjusted" {
16310        #[cfg_attr(
16311            any(target_arch = "aarch64", target_arch = "arm64ec"),
16312            link_name = "llvm.aarch64.neon.fminp.v4f16"
16313        )]
16314        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16315    }
16316    unsafe { _vpmin_f16(a, b) }
16317}
16318#[doc = "Floating-point add pairwise"]
16319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16320#[inline]
16321#[target_feature(enable = "neon,fp16")]
16322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16323#[cfg(not(target_arch = "arm64ec"))]
16324#[cfg_attr(test, assert_instr(fminp))]
16325pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16326    unsafe extern "unadjusted" {
16327        #[cfg_attr(
16328            any(target_arch = "aarch64", target_arch = "arm64ec"),
16329            link_name = "llvm.aarch64.neon.fminp.v8f16"
16330        )]
16331        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16332    }
16333    unsafe { _vpminq_f16(a, b) }
16334}
16335#[doc = "Floating-point add pairwise"]
16336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16337#[inline]
16338#[target_feature(enable = "neon,fp16")]
16339#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16340#[cfg(not(target_arch = "arm64ec"))]
16341#[cfg_attr(test, assert_instr(fminnmp))]
16342pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16343    unsafe extern "unadjusted" {
16344        #[cfg_attr(
16345            any(target_arch = "aarch64", target_arch = "arm64ec"),
16346            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16347        )]
16348        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16349    }
16350    unsafe { _vpminnm_f16(a, b) }
16351}
16352#[doc = "Floating-point add pairwise"]
16353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16354#[inline]
16355#[target_feature(enable = "neon,fp16")]
16356#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16357#[cfg(not(target_arch = "arm64ec"))]
16358#[cfg_attr(test, assert_instr(fminnmp))]
16359pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16360    unsafe extern "unadjusted" {
16361        #[cfg_attr(
16362            any(target_arch = "aarch64", target_arch = "arm64ec"),
16363            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16364        )]
16365        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16366    }
16367    unsafe { _vpminnmq_f16(a, b) }
16368}
16369#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16371#[inline]
16372#[target_feature(enable = "neon")]
16373#[cfg_attr(test, assert_instr(fminnmp))]
16374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16375pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16376    unsafe extern "unadjusted" {
16377        #[cfg_attr(
16378            any(target_arch = "aarch64", target_arch = "arm64ec"),
16379            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16380        )]
16381        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16382    }
16383    unsafe { _vpminnm_f32(a, b) }
16384}
16385#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16387#[inline]
16388#[target_feature(enable = "neon")]
16389#[cfg_attr(test, assert_instr(fminnmp))]
16390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16391pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16392    unsafe extern "unadjusted" {
16393        #[cfg_attr(
16394            any(target_arch = "aarch64", target_arch = "arm64ec"),
16395            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16396        )]
16397        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16398    }
16399    unsafe { _vpminnmq_f32(a, b) }
16400}
16401#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16403#[inline]
16404#[target_feature(enable = "neon")]
16405#[cfg_attr(test, assert_instr(fminnmp))]
16406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16407pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16408    unsafe extern "unadjusted" {
16409        #[cfg_attr(
16410            any(target_arch = "aarch64", target_arch = "arm64ec"),
16411            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16412        )]
16413        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16414    }
16415    unsafe { _vpminnmq_f64(a, b) }
16416}
16417#[doc = "Floating-point minimum number pairwise"]
16418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16419#[inline]
16420#[target_feature(enable = "neon")]
16421#[cfg_attr(test, assert_instr(fminnmp))]
16422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16423pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16424    unsafe extern "unadjusted" {
16425        #[cfg_attr(
16426            any(target_arch = "aarch64", target_arch = "arm64ec"),
16427            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16428        )]
16429        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16430    }
16431    unsafe { _vpminnmqd_f64(a) }
16432}
16433#[doc = "Floating-point minimum number pairwise"]
16434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16435#[inline]
16436#[target_feature(enable = "neon")]
16437#[cfg_attr(test, assert_instr(fminnmp))]
16438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16439pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16440    unsafe extern "unadjusted" {
16441        #[cfg_attr(
16442            any(target_arch = "aarch64", target_arch = "arm64ec"),
16443            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16444        )]
16445        fn _vpminnms_f32(a: float32x2_t) -> f32;
16446    }
16447    unsafe { _vpminnms_f32(a) }
16448}
16449#[doc = "Folding minimum of adjacent pairs"]
16450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16451#[inline]
16452#[target_feature(enable = "neon")]
16453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16454#[cfg_attr(test, assert_instr(fminp))]
16455pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16456    unsafe extern "unadjusted" {
16457        #[cfg_attr(
16458            any(target_arch = "aarch64", target_arch = "arm64ec"),
16459            link_name = "llvm.aarch64.neon.fminp.v4f32"
16460        )]
16461        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16462    }
16463    unsafe { _vpminq_f32(a, b) }
16464}
16465#[doc = "Folding minimum of adjacent pairs"]
16466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16467#[inline]
16468#[target_feature(enable = "neon")]
16469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16470#[cfg_attr(test, assert_instr(fminp))]
16471pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16472    unsafe extern "unadjusted" {
16473        #[cfg_attr(
16474            any(target_arch = "aarch64", target_arch = "arm64ec"),
16475            link_name = "llvm.aarch64.neon.fminp.v2f64"
16476        )]
16477        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16478    }
16479    unsafe { _vpminq_f64(a, b) }
16480}
16481#[doc = "Folding minimum of adjacent pairs"]
16482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16483#[inline]
16484#[target_feature(enable = "neon")]
16485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16486#[cfg_attr(test, assert_instr(sminp))]
16487pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16488    unsafe extern "unadjusted" {
16489        #[cfg_attr(
16490            any(target_arch = "aarch64", target_arch = "arm64ec"),
16491            link_name = "llvm.aarch64.neon.sminp.v16i8"
16492        )]
16493        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16494    }
16495    unsafe { _vpminq_s8(a, b) }
16496}
16497#[doc = "Folding minimum of adjacent pairs"]
16498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16499#[inline]
16500#[target_feature(enable = "neon")]
16501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16502#[cfg_attr(test, assert_instr(sminp))]
16503pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16504    unsafe extern "unadjusted" {
16505        #[cfg_attr(
16506            any(target_arch = "aarch64", target_arch = "arm64ec"),
16507            link_name = "llvm.aarch64.neon.sminp.v8i16"
16508        )]
16509        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16510    }
16511    unsafe { _vpminq_s16(a, b) }
16512}
16513#[doc = "Folding minimum of adjacent pairs"]
16514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16515#[inline]
16516#[target_feature(enable = "neon")]
16517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16518#[cfg_attr(test, assert_instr(sminp))]
16519pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16520    unsafe extern "unadjusted" {
16521        #[cfg_attr(
16522            any(target_arch = "aarch64", target_arch = "arm64ec"),
16523            link_name = "llvm.aarch64.neon.sminp.v4i32"
16524        )]
16525        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16526    }
16527    unsafe { _vpminq_s32(a, b) }
16528}
16529#[doc = "Folding minimum of adjacent pairs"]
16530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16531#[inline]
16532#[target_feature(enable = "neon")]
16533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16534#[cfg_attr(test, assert_instr(uminp))]
16535pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16536    unsafe extern "unadjusted" {
16537        #[cfg_attr(
16538            any(target_arch = "aarch64", target_arch = "arm64ec"),
16539            link_name = "llvm.aarch64.neon.uminp.v16i8"
16540        )]
16541        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16542    }
16543    unsafe { _vpminq_u8(a, b) }
16544}
16545#[doc = "Folding minimum of adjacent pairs"]
16546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16547#[inline]
16548#[target_feature(enable = "neon")]
16549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16550#[cfg_attr(test, assert_instr(uminp))]
16551pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16552    unsafe extern "unadjusted" {
16553        #[cfg_attr(
16554            any(target_arch = "aarch64", target_arch = "arm64ec"),
16555            link_name = "llvm.aarch64.neon.uminp.v8i16"
16556        )]
16557        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16558    }
16559    unsafe { _vpminq_u16(a, b) }
16560}
16561#[doc = "Folding minimum of adjacent pairs"]
16562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16563#[inline]
16564#[target_feature(enable = "neon")]
16565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16566#[cfg_attr(test, assert_instr(uminp))]
16567pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16568    unsafe extern "unadjusted" {
16569        #[cfg_attr(
16570            any(target_arch = "aarch64", target_arch = "arm64ec"),
16571            link_name = "llvm.aarch64.neon.uminp.v4i32"
16572        )]
16573        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16574    }
16575    unsafe { _vpminq_u32(a, b) }
16576}
16577#[doc = "Floating-point minimum pairwise"]
16578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16579#[inline]
16580#[target_feature(enable = "neon")]
16581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16582#[cfg_attr(test, assert_instr(fminp))]
16583pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16584    unsafe extern "unadjusted" {
16585        #[cfg_attr(
16586            any(target_arch = "aarch64", target_arch = "arm64ec"),
16587            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16588        )]
16589        fn _vpminqd_f64(a: float64x2_t) -> f64;
16590    }
16591    unsafe { _vpminqd_f64(a) }
16592}
16593#[doc = "Floating-point minimum pairwise"]
16594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16595#[inline]
16596#[target_feature(enable = "neon")]
16597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16598#[cfg_attr(test, assert_instr(fminp))]
16599pub fn vpmins_f32(a: float32x2_t) -> f32 {
16600    unsafe extern "unadjusted" {
16601        #[cfg_attr(
16602            any(target_arch = "aarch64", target_arch = "arm64ec"),
16603            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16604        )]
16605        fn _vpmins_f32(a: float32x2_t) -> f32;
16606    }
16607    unsafe { _vpmins_f32(a) }
16608}
16609#[doc = "Signed saturating Absolute value"]
16610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16611#[inline]
16612#[target_feature(enable = "neon")]
16613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16614#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16615pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16616    unsafe extern "unadjusted" {
16617        #[cfg_attr(
16618            any(target_arch = "aarch64", target_arch = "arm64ec"),
16619            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16620        )]
16621        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16622    }
16623    unsafe { _vqabs_s64(a) }
16624}
16625#[doc = "Signed saturating Absolute value"]
16626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16627#[inline]
16628#[target_feature(enable = "neon")]
16629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16630#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16631pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16632    unsafe extern "unadjusted" {
16633        #[cfg_attr(
16634            any(target_arch = "aarch64", target_arch = "arm64ec"),
16635            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16636        )]
16637        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16638    }
16639    unsafe { _vqabsq_s64(a) }
16640}
16641#[doc = "Signed saturating absolute value"]
16642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16643#[inline]
16644#[target_feature(enable = "neon")]
16645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16646#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16647pub fn vqabsb_s8(a: i8) -> i8 {
16648    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16649}
16650#[doc = "Signed saturating absolute value"]
16651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16652#[inline]
16653#[target_feature(enable = "neon")]
16654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16656pub fn vqabsh_s16(a: i16) -> i16 {
16657    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16658}
16659#[doc = "Signed saturating absolute value"]
16660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16661#[inline]
16662#[target_feature(enable = "neon")]
16663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16664#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16665pub fn vqabss_s32(a: i32) -> i32 {
16666    unsafe extern "unadjusted" {
16667        #[cfg_attr(
16668            any(target_arch = "aarch64", target_arch = "arm64ec"),
16669            link_name = "llvm.aarch64.neon.sqabs.i32"
16670        )]
16671        fn _vqabss_s32(a: i32) -> i32;
16672    }
16673    unsafe { _vqabss_s32(a) }
16674}
16675#[doc = "Signed saturating absolute value"]
16676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16677#[inline]
16678#[target_feature(enable = "neon")]
16679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16680#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16681pub fn vqabsd_s64(a: i64) -> i64 {
16682    unsafe extern "unadjusted" {
16683        #[cfg_attr(
16684            any(target_arch = "aarch64", target_arch = "arm64ec"),
16685            link_name = "llvm.aarch64.neon.sqabs.i64"
16686        )]
16687        fn _vqabsd_s64(a: i64) -> i64;
16688    }
16689    unsafe { _vqabsd_s64(a) }
16690}
16691#[doc = "Saturating add"]
16692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16693#[inline]
16694#[target_feature(enable = "neon")]
16695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16696#[cfg_attr(test, assert_instr(sqadd))]
16697pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16698    let a: int8x8_t = vdup_n_s8(a);
16699    let b: int8x8_t = vdup_n_s8(b);
16700    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16701}
16702#[doc = "Saturating add"]
16703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16704#[inline]
16705#[target_feature(enable = "neon")]
16706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16707#[cfg_attr(test, assert_instr(sqadd))]
16708pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16709    let a: int16x4_t = vdup_n_s16(a);
16710    let b: int16x4_t = vdup_n_s16(b);
16711    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16712}
16713#[doc = "Saturating add"]
16714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16715#[inline]
16716#[target_feature(enable = "neon")]
16717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16718#[cfg_attr(test, assert_instr(uqadd))]
16719pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16720    let a: uint8x8_t = vdup_n_u8(a);
16721    let b: uint8x8_t = vdup_n_u8(b);
16722    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16723}
16724#[doc = "Saturating add"]
16725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16726#[inline]
16727#[target_feature(enable = "neon")]
16728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16729#[cfg_attr(test, assert_instr(uqadd))]
16730pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16731    let a: uint16x4_t = vdup_n_u16(a);
16732    let b: uint16x4_t = vdup_n_u16(b);
16733    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16734}
16735#[doc = "Saturating add"]
16736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16737#[inline]
16738#[target_feature(enable = "neon")]
16739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16740#[cfg_attr(test, assert_instr(sqadd))]
16741pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16742    unsafe extern "unadjusted" {
16743        #[cfg_attr(
16744            any(target_arch = "aarch64", target_arch = "arm64ec"),
16745            link_name = "llvm.aarch64.neon.sqadd.i32"
16746        )]
16747        fn _vqadds_s32(a: i32, b: i32) -> i32;
16748    }
16749    unsafe { _vqadds_s32(a, b) }
16750}
16751#[doc = "Saturating add"]
16752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16753#[inline]
16754#[target_feature(enable = "neon")]
16755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16756#[cfg_attr(test, assert_instr(sqadd))]
16757pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16758    unsafe extern "unadjusted" {
16759        #[cfg_attr(
16760            any(target_arch = "aarch64", target_arch = "arm64ec"),
16761            link_name = "llvm.aarch64.neon.sqadd.i64"
16762        )]
16763        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16764    }
16765    unsafe { _vqaddd_s64(a, b) }
16766}
16767#[doc = "Saturating add"]
16768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16769#[inline]
16770#[target_feature(enable = "neon")]
16771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16772#[cfg_attr(test, assert_instr(uqadd))]
16773pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16774    unsafe extern "unadjusted" {
16775        #[cfg_attr(
16776            any(target_arch = "aarch64", target_arch = "arm64ec"),
16777            link_name = "llvm.aarch64.neon.uqadd.i32"
16778        )]
16779        fn _vqadds_u32(a: u32, b: u32) -> u32;
16780    }
16781    unsafe { _vqadds_u32(a, b) }
16782}
16783#[doc = "Saturating add"]
16784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16785#[inline]
16786#[target_feature(enable = "neon")]
16787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16788#[cfg_attr(test, assert_instr(uqadd))]
16789pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16790    unsafe extern "unadjusted" {
16791        #[cfg_attr(
16792            any(target_arch = "aarch64", target_arch = "arm64ec"),
16793            link_name = "llvm.aarch64.neon.uqadd.i64"
16794        )]
16795        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16796    }
16797    unsafe { _vqaddd_u64(a, b) }
16798}
16799#[doc = "Signed saturating doubling multiply-add long"]
16800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16801#[inline]
16802#[target_feature(enable = "neon")]
16803#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16804#[rustc_legacy_const_generics(3)]
16805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16806pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16807    static_assert_uimm_bits!(N, 2);
16808    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16809}
16810#[doc = "Signed saturating doubling multiply-add long"]
16811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16812#[inline]
16813#[target_feature(enable = "neon")]
16814#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16815#[rustc_legacy_const_generics(3)]
16816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16817pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16818    static_assert_uimm_bits!(N, 3);
16819    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16820}
16821#[doc = "Signed saturating doubling multiply-add long"]
16822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16823#[inline]
16824#[target_feature(enable = "neon")]
16825#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16826#[rustc_legacy_const_generics(3)]
16827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16828pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16829    static_assert_uimm_bits!(N, 1);
16830    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16831}
16832#[doc = "Signed saturating doubling multiply-add long"]
16833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16834#[inline]
16835#[target_feature(enable = "neon")]
16836#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16837#[rustc_legacy_const_generics(3)]
16838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16839pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16840    static_assert_uimm_bits!(N, 2);
16841    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16842}
16843#[doc = "Signed saturating doubling multiply-add long"]
16844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16845#[inline]
16846#[target_feature(enable = "neon")]
16847#[cfg_attr(test, assert_instr(sqdmlal2))]
16848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16849pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16850    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16851}
16852#[doc = "Signed saturating doubling multiply-add long"]
16853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16854#[inline]
16855#[target_feature(enable = "neon")]
16856#[cfg_attr(test, assert_instr(sqdmlal2))]
16857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16858pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16859    vqaddq_s32(a, vqdmull_high_s16(b, c))
16860}
16861#[doc = "Signed saturating doubling multiply-add long"]
16862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16863#[inline]
16864#[target_feature(enable = "neon")]
16865#[cfg_attr(test, assert_instr(sqdmlal2))]
16866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16867pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16868    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16869}
16870#[doc = "Signed saturating doubling multiply-add long"]
16871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16872#[inline]
16873#[target_feature(enable = "neon")]
16874#[cfg_attr(test, assert_instr(sqdmlal2))]
16875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16876pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16877    vqaddq_s64(a, vqdmull_high_s32(b, c))
16878}
16879#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16881#[inline]
16882#[target_feature(enable = "neon")]
16883#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16884#[rustc_legacy_const_generics(3)]
16885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16886pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16887    static_assert_uimm_bits!(N, 3);
16888    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16889}
16890#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16892#[inline]
16893#[target_feature(enable = "neon")]
16894#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16895#[rustc_legacy_const_generics(3)]
16896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16897pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16898    static_assert_uimm_bits!(N, 2);
16899    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16900}
16901#[doc = "Signed saturating doubling multiply-add long"]
16902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16903#[inline]
16904#[target_feature(enable = "neon")]
16905#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16906#[rustc_legacy_const_generics(3)]
16907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16908pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16909    static_assert_uimm_bits!(LANE, 2);
16910    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16911}
16912#[doc = "Signed saturating doubling multiply-add long"]
16913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16914#[inline]
16915#[target_feature(enable = "neon")]
16916#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16917#[rustc_legacy_const_generics(3)]
16918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16919pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16920    static_assert_uimm_bits!(LANE, 3);
16921    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16922}
16923#[doc = "Signed saturating doubling multiply-add long"]
16924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16925#[inline]
16926#[target_feature(enable = "neon")]
16927#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16928#[rustc_legacy_const_generics(3)]
16929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16930pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16931    static_assert_uimm_bits!(LANE, 1);
16932    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16933}
16934#[doc = "Signed saturating doubling multiply-add long"]
16935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16936#[inline]
16937#[target_feature(enable = "neon")]
16938#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16939#[rustc_legacy_const_generics(3)]
16940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16941pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16942    static_assert_uimm_bits!(LANE, 2);
16943    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16944}
16945#[doc = "Signed saturating doubling multiply-add long"]
16946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16947#[inline]
16948#[target_feature(enable = "neon")]
16949#[cfg_attr(test, assert_instr(sqdmlal))]
16950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16951pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16952    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16953    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16954}
16955#[doc = "Signed saturating doubling multiply-add long"]
16956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16957#[inline]
16958#[target_feature(enable = "neon")]
16959#[cfg_attr(test, assert_instr(sqdmlal))]
16960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16961pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16962    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16963    x
16964}
16965#[doc = "Signed saturating doubling multiply-subtract long"]
16966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16967#[inline]
16968#[target_feature(enable = "neon")]
16969#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16970#[rustc_legacy_const_generics(3)]
16971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16972pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16973    static_assert_uimm_bits!(N, 2);
16974    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16975}
16976#[doc = "Signed saturating doubling multiply-subtract long"]
16977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16978#[inline]
16979#[target_feature(enable = "neon")]
16980#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16981#[rustc_legacy_const_generics(3)]
16982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16983pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16984    static_assert_uimm_bits!(N, 3);
16985    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16986}
16987#[doc = "Signed saturating doubling multiply-subtract long"]
16988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
16989#[inline]
16990#[target_feature(enable = "neon")]
16991#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16992#[rustc_legacy_const_generics(3)]
16993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16994pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16995    static_assert_uimm_bits!(N, 1);
16996    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16997}
16998#[doc = "Signed saturating doubling multiply-subtract long"]
16999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17000#[inline]
17001#[target_feature(enable = "neon")]
17002#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17003#[rustc_legacy_const_generics(3)]
17004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17005pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17006    static_assert_uimm_bits!(N, 2);
17007    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17008}
17009#[doc = "Signed saturating doubling multiply-subtract long"]
17010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17011#[inline]
17012#[target_feature(enable = "neon")]
17013#[cfg_attr(test, assert_instr(sqdmlsl2))]
17014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17015pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17016    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17017}
17018#[doc = "Signed saturating doubling multiply-subtract long"]
17019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17020#[inline]
17021#[target_feature(enable = "neon")]
17022#[cfg_attr(test, assert_instr(sqdmlsl2))]
17023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17024pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17025    vqsubq_s32(a, vqdmull_high_s16(b, c))
17026}
17027#[doc = "Signed saturating doubling multiply-subtract long"]
17028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17029#[inline]
17030#[target_feature(enable = "neon")]
17031#[cfg_attr(test, assert_instr(sqdmlsl2))]
17032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17033pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17034    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17035}
17036#[doc = "Signed saturating doubling multiply-subtract long"]
17037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17038#[inline]
17039#[target_feature(enable = "neon")]
17040#[cfg_attr(test, assert_instr(sqdmlsl2))]
17041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17042pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17043    vqsubq_s64(a, vqdmull_high_s32(b, c))
17044}
17045#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17047#[inline]
17048#[target_feature(enable = "neon")]
17049#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17050#[rustc_legacy_const_generics(3)]
17051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17052pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17053    static_assert_uimm_bits!(N, 3);
17054    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17055}
17056#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17058#[inline]
17059#[target_feature(enable = "neon")]
17060#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17061#[rustc_legacy_const_generics(3)]
17062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17063pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17064    static_assert_uimm_bits!(N, 2);
17065    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17066}
17067#[doc = "Signed saturating doubling multiply-subtract long"]
17068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17069#[inline]
17070#[target_feature(enable = "neon")]
17071#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17072#[rustc_legacy_const_generics(3)]
17073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17074pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17075    static_assert_uimm_bits!(LANE, 2);
17076    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17077}
17078#[doc = "Signed saturating doubling multiply-subtract long"]
17079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17080#[inline]
17081#[target_feature(enable = "neon")]
17082#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17083#[rustc_legacy_const_generics(3)]
17084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17085pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17086    static_assert_uimm_bits!(LANE, 3);
17087    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17088}
17089#[doc = "Signed saturating doubling multiply-subtract long"]
17090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17091#[inline]
17092#[target_feature(enable = "neon")]
17093#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17094#[rustc_legacy_const_generics(3)]
17095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17096pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17097    static_assert_uimm_bits!(LANE, 1);
17098    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17099}
17100#[doc = "Signed saturating doubling multiply-subtract long"]
17101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17102#[inline]
17103#[target_feature(enable = "neon")]
17104#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17105#[rustc_legacy_const_generics(3)]
17106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17107pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17108    static_assert_uimm_bits!(LANE, 2);
17109    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17110}
17111#[doc = "Signed saturating doubling multiply-subtract long"]
17112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17113#[inline]
17114#[target_feature(enable = "neon")]
17115#[cfg_attr(test, assert_instr(sqdmlsl))]
17116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17117pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17118    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17119    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17120}
17121#[doc = "Signed saturating doubling multiply-subtract long"]
17122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17123#[inline]
17124#[target_feature(enable = "neon")]
17125#[cfg_attr(test, assert_instr(sqdmlsl))]
17126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17127pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17128    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17129    x
17130}
17131#[doc = "Vector saturating doubling multiply high by scalar"]
17132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17133#[inline]
17134#[target_feature(enable = "neon")]
17135#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17136#[rustc_legacy_const_generics(2)]
17137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17138pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17139    static_assert_uimm_bits!(LANE, 2);
17140    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17141}
17142#[doc = "Vector saturating doubling multiply high by scalar"]
17143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17144#[inline]
17145#[target_feature(enable = "neon")]
17146#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17147#[rustc_legacy_const_generics(2)]
17148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17149pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17150    static_assert_uimm_bits!(LANE, 2);
17151    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17152}
17153#[doc = "Vector saturating doubling multiply high by scalar"]
17154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17155#[inline]
17156#[target_feature(enable = "neon")]
17157#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17158#[rustc_legacy_const_generics(2)]
17159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17160pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17161    static_assert_uimm_bits!(LANE, 1);
17162    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17163}
17164#[doc = "Vector saturating doubling multiply high by scalar"]
17165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17166#[inline]
17167#[target_feature(enable = "neon")]
17168#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17169#[rustc_legacy_const_generics(2)]
17170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17171pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17172    static_assert_uimm_bits!(LANE, 1);
17173    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17174}
17175#[doc = "Signed saturating doubling multiply returning high half"]
17176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17177#[inline]
17178#[target_feature(enable = "neon")]
17179#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17180#[rustc_legacy_const_generics(2)]
17181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17182pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17183    static_assert_uimm_bits!(N, 2);
17184    unsafe {
17185        let b: i16 = simd_extract!(b, N as u32);
17186        vqdmulhh_s16(a, b)
17187    }
17188}
17189#[doc = "Signed saturating doubling multiply returning high half"]
17190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17191#[inline]
17192#[target_feature(enable = "neon")]
17193#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17194#[rustc_legacy_const_generics(2)]
17195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17196pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17197    static_assert_uimm_bits!(N, 3);
17198    unsafe {
17199        let b: i16 = simd_extract!(b, N as u32);
17200        vqdmulhh_s16(a, b)
17201    }
17202}
17203#[doc = "Signed saturating doubling multiply returning high half"]
17204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17205#[inline]
17206#[target_feature(enable = "neon")]
17207#[cfg_attr(test, assert_instr(sqdmulh))]
17208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17209pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17210    let a: int16x4_t = vdup_n_s16(a);
17211    let b: int16x4_t = vdup_n_s16(b);
17212    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17213}
17214#[doc = "Signed saturating doubling multiply returning high half"]
17215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17216#[inline]
17217#[target_feature(enable = "neon")]
17218#[cfg_attr(test, assert_instr(sqdmulh))]
17219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17220pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17221    let a: int32x2_t = vdup_n_s32(a);
17222    let b: int32x2_t = vdup_n_s32(b);
17223    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17224}
17225#[doc = "Signed saturating doubling multiply returning high half"]
17226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17227#[inline]
17228#[target_feature(enable = "neon")]
17229#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17230#[rustc_legacy_const_generics(2)]
17231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17232pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17233    static_assert_uimm_bits!(N, 1);
17234    unsafe {
17235        let b: i32 = simd_extract!(b, N as u32);
17236        vqdmulhs_s32(a, b)
17237    }
17238}
17239#[doc = "Signed saturating doubling multiply returning high half"]
17240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17241#[inline]
17242#[target_feature(enable = "neon")]
17243#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17244#[rustc_legacy_const_generics(2)]
17245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17246pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17247    static_assert_uimm_bits!(N, 2);
17248    unsafe {
17249        let b: i32 = simd_extract!(b, N as u32);
17250        vqdmulhs_s32(a, b)
17251    }
17252}
17253#[doc = "Signed saturating doubling multiply long"]
17254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17255#[inline]
17256#[target_feature(enable = "neon")]
17257#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17258#[rustc_legacy_const_generics(2)]
17259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17260pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17261    static_assert_uimm_bits!(N, 2);
17262    unsafe {
17263        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17264        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17265        vqdmull_s16(a, b)
17266    }
17267}
17268#[doc = "Signed saturating doubling multiply long"]
17269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17270#[inline]
17271#[target_feature(enable = "neon")]
17272#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17273#[rustc_legacy_const_generics(2)]
17274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17275pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17276    static_assert_uimm_bits!(N, 2);
17277    unsafe {
17278        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17279        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17280        vqdmull_s32(a, b)
17281    }
17282}
17283#[doc = "Signed saturating doubling multiply long"]
17284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17285#[inline]
17286#[target_feature(enable = "neon")]
17287#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17288#[rustc_legacy_const_generics(2)]
17289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17290pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17291    static_assert_uimm_bits!(N, 1);
17292    unsafe {
17293        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17294        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17295        vqdmull_s32(a, b)
17296    }
17297}
17298#[doc = "Signed saturating doubling multiply long"]
17299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17300#[inline]
17301#[target_feature(enable = "neon")]
17302#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17303#[rustc_legacy_const_generics(2)]
17304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17305pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17306    static_assert_uimm_bits!(N, 3);
17307    unsafe {
17308        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17309        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17310        vqdmull_s16(a, b)
17311    }
17312}
17313#[doc = "Signed saturating doubling multiply long"]
17314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17315#[inline]
17316#[target_feature(enable = "neon")]
17317#[cfg_attr(test, assert_instr(sqdmull2))]
17318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17319pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17320    unsafe {
17321        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17322        let b: int16x4_t = vdup_n_s16(b);
17323        vqdmull_s16(a, b)
17324    }
17325}
17326#[doc = "Signed saturating doubling multiply long"]
17327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17328#[inline]
17329#[target_feature(enable = "neon")]
17330#[cfg_attr(test, assert_instr(sqdmull2))]
17331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17332pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17333    unsafe {
17334        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17335        let b: int32x2_t = vdup_n_s32(b);
17336        vqdmull_s32(a, b)
17337    }
17338}
17339#[doc = "Signed saturating doubling multiply long"]
17340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17341#[inline]
17342#[target_feature(enable = "neon")]
17343#[cfg_attr(test, assert_instr(sqdmull2))]
17344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17345pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17346    unsafe {
17347        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17348        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17349        vqdmull_s16(a, b)
17350    }
17351}
17352#[doc = "Signed saturating doubling multiply long"]
17353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17354#[inline]
17355#[target_feature(enable = "neon")]
17356#[cfg_attr(test, assert_instr(sqdmull2))]
17357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17358pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17359    unsafe {
17360        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17361        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17362        vqdmull_s32(a, b)
17363    }
17364}
17365#[doc = "Vector saturating doubling long multiply by scalar"]
17366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17367#[inline]
17368#[target_feature(enable = "neon")]
17369#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17370#[rustc_legacy_const_generics(2)]
17371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17372pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17373    static_assert_uimm_bits!(N, 3);
17374    unsafe {
17375        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17376        vqdmull_s16(a, b)
17377    }
17378}
17379#[doc = "Vector saturating doubling long multiply by scalar"]
17380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17381#[inline]
17382#[target_feature(enable = "neon")]
17383#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17384#[rustc_legacy_const_generics(2)]
17385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17386pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17387    static_assert_uimm_bits!(N, 2);
17388    unsafe {
17389        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17390        vqdmull_s32(a, b)
17391    }
17392}
17393#[doc = "Signed saturating doubling multiply long"]
17394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17395#[inline]
17396#[target_feature(enable = "neon")]
17397#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17398#[rustc_legacy_const_generics(2)]
17399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17400pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17401    static_assert_uimm_bits!(N, 2);
17402    unsafe {
17403        let b: i16 = simd_extract!(b, N as u32);
17404        vqdmullh_s16(a, b)
17405    }
17406}
17407#[doc = "Signed saturating doubling multiply long"]
17408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17409#[inline]
17410#[target_feature(enable = "neon")]
17411#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17412#[rustc_legacy_const_generics(2)]
17413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17414pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17415    static_assert_uimm_bits!(N, 2);
17416    unsafe {
17417        let b: i32 = simd_extract!(b, N as u32);
17418        vqdmulls_s32(a, b)
17419    }
17420}
17421#[doc = "Signed saturating doubling multiply long"]
17422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17423#[inline]
17424#[target_feature(enable = "neon")]
17425#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17426#[rustc_legacy_const_generics(2)]
17427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17428pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17429    static_assert_uimm_bits!(N, 3);
17430    unsafe {
17431        let b: i16 = simd_extract!(b, N as u32);
17432        vqdmullh_s16(a, b)
17433    }
17434}
17435#[doc = "Signed saturating doubling multiply long"]
17436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17437#[inline]
17438#[target_feature(enable = "neon")]
17439#[cfg_attr(test, assert_instr(sqdmull))]
17440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17441pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17442    let a: int16x4_t = vdup_n_s16(a);
17443    let b: int16x4_t = vdup_n_s16(b);
17444    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17445}
17446#[doc = "Signed saturating doubling multiply long"]
17447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17448#[inline]
17449#[target_feature(enable = "neon")]
17450#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17451#[rustc_legacy_const_generics(2)]
17452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17453pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17454    static_assert_uimm_bits!(N, 1);
17455    unsafe {
17456        let b: i32 = simd_extract!(b, N as u32);
17457        vqdmulls_s32(a, b)
17458    }
17459}
17460#[doc = "Signed saturating doubling multiply long"]
17461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17462#[inline]
17463#[target_feature(enable = "neon")]
17464#[cfg_attr(test, assert_instr(sqdmull))]
17465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17466pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17467    unsafe extern "unadjusted" {
17468        #[cfg_attr(
17469            any(target_arch = "aarch64", target_arch = "arm64ec"),
17470            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17471        )]
17472        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17473    }
17474    unsafe { _vqdmulls_s32(a, b) }
17475}
17476#[doc = "Signed saturating extract narrow"]
17477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17478#[inline]
17479#[target_feature(enable = "neon")]
17480#[cfg_attr(test, assert_instr(sqxtn2))]
17481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17482pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17483    unsafe {
17484        simd_shuffle!(
17485            a,
17486            vqmovn_s16(b),
17487            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17488        )
17489    }
17490}
17491#[doc = "Signed saturating extract narrow"]
17492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17493#[inline]
17494#[target_feature(enable = "neon")]
17495#[cfg_attr(test, assert_instr(sqxtn2))]
17496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17497pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17498    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17499}
17500#[doc = "Signed saturating extract narrow"]
17501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17502#[inline]
17503#[target_feature(enable = "neon")]
17504#[cfg_attr(test, assert_instr(sqxtn2))]
17505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17506pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17507    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17508}
17509#[doc = "Signed saturating extract narrow"]
17510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17511#[inline]
17512#[target_feature(enable = "neon")]
17513#[cfg_attr(test, assert_instr(uqxtn2))]
17514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17515pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17516    unsafe {
17517        simd_shuffle!(
17518            a,
17519            vqmovn_u16(b),
17520            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17521        )
17522    }
17523}
17524#[doc = "Signed saturating extract narrow"]
17525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17526#[inline]
17527#[target_feature(enable = "neon")]
17528#[cfg_attr(test, assert_instr(uqxtn2))]
17529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17530pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17531    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17532}
17533#[doc = "Signed saturating extract narrow"]
17534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17535#[inline]
17536#[target_feature(enable = "neon")]
17537#[cfg_attr(test, assert_instr(uqxtn2))]
17538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17539pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17540    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17541}
17542#[doc = "Saturating extract narrow"]
17543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17544#[inline]
17545#[target_feature(enable = "neon")]
17546#[cfg_attr(test, assert_instr(sqxtn))]
17547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17548pub fn vqmovnd_s64(a: i64) -> i32 {
17549    unsafe extern "unadjusted" {
17550        #[cfg_attr(
17551            any(target_arch = "aarch64", target_arch = "arm64ec"),
17552            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17553        )]
17554        fn _vqmovnd_s64(a: i64) -> i32;
17555    }
17556    unsafe { _vqmovnd_s64(a) }
17557}
17558#[doc = "Saturating extract narrow"]
17559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17560#[inline]
17561#[target_feature(enable = "neon")]
17562#[cfg_attr(test, assert_instr(uqxtn))]
17563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17564pub fn vqmovnd_u64(a: u64) -> u32 {
17565    unsafe extern "unadjusted" {
17566        #[cfg_attr(
17567            any(target_arch = "aarch64", target_arch = "arm64ec"),
17568            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17569        )]
17570        fn _vqmovnd_u64(a: u64) -> u32;
17571    }
17572    unsafe { _vqmovnd_u64(a) }
17573}
17574#[doc = "Saturating extract narrow"]
17575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17576#[inline]
17577#[target_feature(enable = "neon")]
17578#[cfg_attr(test, assert_instr(sqxtn))]
17579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17580pub fn vqmovnh_s16(a: i16) -> i8 {
17581    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17582}
17583#[doc = "Saturating extract narrow"]
17584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17585#[inline]
17586#[target_feature(enable = "neon")]
17587#[cfg_attr(test, assert_instr(sqxtn))]
17588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17589pub fn vqmovns_s32(a: i32) -> i16 {
17590    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17591}
17592#[doc = "Saturating extract narrow"]
17593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17594#[inline]
17595#[target_feature(enable = "neon")]
17596#[cfg_attr(test, assert_instr(uqxtn))]
17597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17598pub fn vqmovnh_u16(a: u16) -> u8 {
17599    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17600}
17601#[doc = "Saturating extract narrow"]
17602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17603#[inline]
17604#[target_feature(enable = "neon")]
17605#[cfg_attr(test, assert_instr(uqxtn))]
17606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17607pub fn vqmovns_u32(a: u32) -> u16 {
17608    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17609}
17610#[doc = "Signed saturating extract unsigned narrow"]
17611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17612#[inline]
17613#[target_feature(enable = "neon")]
17614#[cfg_attr(test, assert_instr(sqxtun2))]
17615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17616pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17617    unsafe {
17618        simd_shuffle!(
17619            a,
17620            vqmovun_s16(b),
17621            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17622        )
17623    }
17624}
17625#[doc = "Signed saturating extract unsigned narrow"]
17626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17627#[inline]
17628#[target_feature(enable = "neon")]
17629#[cfg_attr(test, assert_instr(sqxtun2))]
17630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17631pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17632    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17633}
17634#[doc = "Signed saturating extract unsigned narrow"]
17635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17636#[inline]
17637#[target_feature(enable = "neon")]
17638#[cfg_attr(test, assert_instr(sqxtun2))]
17639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17640pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17641    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17642}
17643#[doc = "Signed saturating extract unsigned narrow"]
17644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17645#[inline]
17646#[target_feature(enable = "neon")]
17647#[cfg_attr(test, assert_instr(sqxtun))]
17648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17649pub fn vqmovunh_s16(a: i16) -> u8 {
17650    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17651}
17652#[doc = "Signed saturating extract unsigned narrow"]
17653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17654#[inline]
17655#[target_feature(enable = "neon")]
17656#[cfg_attr(test, assert_instr(sqxtun))]
17657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17658pub fn vqmovuns_s32(a: i32) -> u16 {
17659    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17660}
17661#[doc = "Signed saturating extract unsigned narrow"]
17662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17663#[inline]
17664#[target_feature(enable = "neon")]
17665#[cfg_attr(test, assert_instr(sqxtun))]
17666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17667pub fn vqmovund_s64(a: i64) -> u32 {
17668    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17669}
17670#[doc = "Signed saturating negate"]
17671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17672#[inline]
17673#[target_feature(enable = "neon")]
17674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17675#[cfg_attr(test, assert_instr(sqneg))]
17676pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17677    unsafe extern "unadjusted" {
17678        #[cfg_attr(
17679            any(target_arch = "aarch64", target_arch = "arm64ec"),
17680            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17681        )]
17682        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17683    }
17684    unsafe { _vqneg_s64(a) }
17685}
17686#[doc = "Signed saturating negate"]
17687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17688#[inline]
17689#[target_feature(enable = "neon")]
17690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17691#[cfg_attr(test, assert_instr(sqneg))]
17692pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17693    unsafe extern "unadjusted" {
17694        #[cfg_attr(
17695            any(target_arch = "aarch64", target_arch = "arm64ec"),
17696            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17697        )]
17698        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17699    }
17700    unsafe { _vqnegq_s64(a) }
17701}
17702#[doc = "Signed saturating negate"]
17703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17704#[inline]
17705#[target_feature(enable = "neon")]
17706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17707#[cfg_attr(test, assert_instr(sqneg))]
17708pub fn vqnegb_s8(a: i8) -> i8 {
17709    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17710}
17711#[doc = "Signed saturating negate"]
17712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17713#[inline]
17714#[target_feature(enable = "neon")]
17715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17716#[cfg_attr(test, assert_instr(sqneg))]
17717pub fn vqnegh_s16(a: i16) -> i16 {
17718    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17719}
17720#[doc = "Signed saturating negate"]
17721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17722#[inline]
17723#[target_feature(enable = "neon")]
17724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17725#[cfg_attr(test, assert_instr(sqneg))]
17726pub fn vqnegs_s32(a: i32) -> i32 {
17727    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17728}
17729#[doc = "Signed saturating negate"]
17730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17731#[inline]
17732#[target_feature(enable = "neon")]
17733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17734#[cfg_attr(test, assert_instr(sqneg))]
17735pub fn vqnegd_s64(a: i64) -> i64 {
17736    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17737}
17738#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17740#[inline]
17741#[target_feature(enable = "rdm")]
17742#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17743#[rustc_legacy_const_generics(3)]
17744#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17745pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17746    static_assert_uimm_bits!(LANE, 2);
17747    unsafe {
17748        let c: int16x4_t =
17749            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17750        vqrdmlah_s16(a, b, c)
17751    }
17752}
17753#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17755#[inline]
17756#[target_feature(enable = "rdm")]
17757#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17758#[rustc_legacy_const_generics(3)]
17759#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17760pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17761    static_assert_uimm_bits!(LANE, 1);
17762    unsafe {
17763        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17764        vqrdmlah_s32(a, b, c)
17765    }
17766}
17767#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17769#[inline]
17770#[target_feature(enable = "rdm")]
17771#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17772#[rustc_legacy_const_generics(3)]
17773#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17774pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17775    static_assert_uimm_bits!(LANE, 3);
17776    unsafe {
17777        let c: int16x4_t =
17778            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17779        vqrdmlah_s16(a, b, c)
17780    }
17781}
17782#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17784#[inline]
17785#[target_feature(enable = "rdm")]
17786#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17787#[rustc_legacy_const_generics(3)]
17788#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17789pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17790    static_assert_uimm_bits!(LANE, 2);
17791    unsafe {
17792        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17793        vqrdmlah_s32(a, b, c)
17794    }
17795}
17796#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17798#[inline]
17799#[target_feature(enable = "rdm")]
17800#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17801#[rustc_legacy_const_generics(3)]
17802#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17803pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17804    static_assert_uimm_bits!(LANE, 2);
17805    unsafe {
17806        let c: int16x8_t = simd_shuffle!(
17807            c,
17808            c,
17809            [
17810                LANE as u32,
17811                LANE as u32,
17812                LANE as u32,
17813                LANE as u32,
17814                LANE as u32,
17815                LANE as u32,
17816                LANE as u32,
17817                LANE as u32
17818            ]
17819        );
17820        vqrdmlahq_s16(a, b, c)
17821    }
17822}
17823#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17825#[inline]
17826#[target_feature(enable = "rdm")]
17827#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17828#[rustc_legacy_const_generics(3)]
17829#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17830pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17831    static_assert_uimm_bits!(LANE, 1);
17832    unsafe {
17833        let c: int32x4_t =
17834            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17835        vqrdmlahq_s32(a, b, c)
17836    }
17837}
17838#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17840#[inline]
17841#[target_feature(enable = "rdm")]
17842#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17843#[rustc_legacy_const_generics(3)]
17844#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17845pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17846    static_assert_uimm_bits!(LANE, 3);
17847    unsafe {
17848        let c: int16x8_t = simd_shuffle!(
17849            c,
17850            c,
17851            [
17852                LANE as u32,
17853                LANE as u32,
17854                LANE as u32,
17855                LANE as u32,
17856                LANE as u32,
17857                LANE as u32,
17858                LANE as u32,
17859                LANE as u32
17860            ]
17861        );
17862        vqrdmlahq_s16(a, b, c)
17863    }
17864}
17865#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17867#[inline]
17868#[target_feature(enable = "rdm")]
17869#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17870#[rustc_legacy_const_generics(3)]
17871#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17872pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17873    static_assert_uimm_bits!(LANE, 2);
17874    unsafe {
17875        let c: int32x4_t =
17876            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17877        vqrdmlahq_s32(a, b, c)
17878    }
17879}
17880#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17882#[inline]
17883#[target_feature(enable = "rdm")]
17884#[cfg_attr(test, assert_instr(sqrdmlah))]
17885#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17886pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17887    unsafe extern "unadjusted" {
17888        #[cfg_attr(
17889            any(target_arch = "aarch64", target_arch = "arm64ec"),
17890            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17891        )]
17892        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17893    }
17894    unsafe { _vqrdmlah_s16(a, b, c) }
17895}
17896#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17898#[inline]
17899#[target_feature(enable = "rdm")]
17900#[cfg_attr(test, assert_instr(sqrdmlah))]
17901#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17902pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17903    unsafe extern "unadjusted" {
17904        #[cfg_attr(
17905            any(target_arch = "aarch64", target_arch = "arm64ec"),
17906            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17907        )]
17908        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17909    }
17910    unsafe { _vqrdmlahq_s16(a, b, c) }
17911}
17912#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17914#[inline]
17915#[target_feature(enable = "rdm")]
17916#[cfg_attr(test, assert_instr(sqrdmlah))]
17917#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17918pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17919    unsafe extern "unadjusted" {
17920        #[cfg_attr(
17921            any(target_arch = "aarch64", target_arch = "arm64ec"),
17922            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17923        )]
17924        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17925    }
17926    unsafe { _vqrdmlah_s32(a, b, c) }
17927}
17928#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17930#[inline]
17931#[target_feature(enable = "rdm")]
17932#[cfg_attr(test, assert_instr(sqrdmlah))]
17933#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17934pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17935    unsafe extern "unadjusted" {
17936        #[cfg_attr(
17937            any(target_arch = "aarch64", target_arch = "arm64ec"),
17938            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17939        )]
17940        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17941    }
17942    unsafe { _vqrdmlahq_s32(a, b, c) }
17943}
17944#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17946#[inline]
17947#[target_feature(enable = "rdm")]
17948#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17949#[rustc_legacy_const_generics(3)]
17950#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17951pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17952    static_assert_uimm_bits!(LANE, 2);
17953    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17954}
17955#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17957#[inline]
17958#[target_feature(enable = "rdm")]
17959#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17960#[rustc_legacy_const_generics(3)]
17961#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17962pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17963    static_assert_uimm_bits!(LANE, 3);
17964    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17965}
17966#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17968#[inline]
17969#[target_feature(enable = "rdm")]
17970#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17971#[rustc_legacy_const_generics(3)]
17972#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17973pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17974    static_assert_uimm_bits!(LANE, 1);
17975    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17976}
17977#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17979#[inline]
17980#[target_feature(enable = "rdm")]
17981#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17982#[rustc_legacy_const_generics(3)]
17983#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17984pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17985    static_assert_uimm_bits!(LANE, 2);
17986    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17987}
17988#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
17990#[inline]
17991#[target_feature(enable = "rdm")]
17992#[cfg_attr(test, assert_instr(sqrdmlah))]
17993#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17994pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
17995    let a: int16x4_t = vdup_n_s16(a);
17996    let b: int16x4_t = vdup_n_s16(b);
17997    let c: int16x4_t = vdup_n_s16(c);
17998    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
17999}
18000#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18002#[inline]
18003#[target_feature(enable = "rdm")]
18004#[cfg_attr(test, assert_instr(sqrdmlah))]
18005#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18006pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18007    let a: int32x2_t = vdup_n_s32(a);
18008    let b: int32x2_t = vdup_n_s32(b);
18009    let c: int32x2_t = vdup_n_s32(c);
18010    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18011}
18012#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18014#[inline]
18015#[target_feature(enable = "rdm")]
18016#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18017#[rustc_legacy_const_generics(3)]
18018#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18019pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18020    static_assert_uimm_bits!(LANE, 2);
18021    unsafe {
18022        let c: int16x4_t =
18023            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18024        vqrdmlsh_s16(a, b, c)
18025    }
18026}
18027#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18029#[inline]
18030#[target_feature(enable = "rdm")]
18031#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18032#[rustc_legacy_const_generics(3)]
18033#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18034pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18035    static_assert_uimm_bits!(LANE, 1);
18036    unsafe {
18037        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18038        vqrdmlsh_s32(a, b, c)
18039    }
18040}
18041#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18043#[inline]
18044#[target_feature(enable = "rdm")]
18045#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18046#[rustc_legacy_const_generics(3)]
18047#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18048pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18049    static_assert_uimm_bits!(LANE, 3);
18050    unsafe {
18051        let c: int16x4_t =
18052            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18053        vqrdmlsh_s16(a, b, c)
18054    }
18055}
18056#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18058#[inline]
18059#[target_feature(enable = "rdm")]
18060#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18061#[rustc_legacy_const_generics(3)]
18062#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18063pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18064    static_assert_uimm_bits!(LANE, 2);
18065    unsafe {
18066        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18067        vqrdmlsh_s32(a, b, c)
18068    }
18069}
18070#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18072#[inline]
18073#[target_feature(enable = "rdm")]
18074#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18075#[rustc_legacy_const_generics(3)]
18076#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18077pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18078    static_assert_uimm_bits!(LANE, 2);
18079    unsafe {
18080        let c: int16x8_t = simd_shuffle!(
18081            c,
18082            c,
18083            [
18084                LANE as u32,
18085                LANE as u32,
18086                LANE as u32,
18087                LANE as u32,
18088                LANE as u32,
18089                LANE as u32,
18090                LANE as u32,
18091                LANE as u32
18092            ]
18093        );
18094        vqrdmlshq_s16(a, b, c)
18095    }
18096}
18097#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18099#[inline]
18100#[target_feature(enable = "rdm")]
18101#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18102#[rustc_legacy_const_generics(3)]
18103#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18104pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18105    static_assert_uimm_bits!(LANE, 1);
18106    unsafe {
18107        let c: int32x4_t =
18108            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18109        vqrdmlshq_s32(a, b, c)
18110    }
18111}
18112#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18114#[inline]
18115#[target_feature(enable = "rdm")]
18116#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18117#[rustc_legacy_const_generics(3)]
18118#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18119pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18120    static_assert_uimm_bits!(LANE, 3);
18121    unsafe {
18122        let c: int16x8_t = simd_shuffle!(
18123            c,
18124            c,
18125            [
18126                LANE as u32,
18127                LANE as u32,
18128                LANE as u32,
18129                LANE as u32,
18130                LANE as u32,
18131                LANE as u32,
18132                LANE as u32,
18133                LANE as u32
18134            ]
18135        );
18136        vqrdmlshq_s16(a, b, c)
18137    }
18138}
18139#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18141#[inline]
18142#[target_feature(enable = "rdm")]
18143#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18144#[rustc_legacy_const_generics(3)]
18145#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18146pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18147    static_assert_uimm_bits!(LANE, 2);
18148    unsafe {
18149        let c: int32x4_t =
18150            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18151        vqrdmlshq_s32(a, b, c)
18152    }
18153}
18154#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18156#[inline]
18157#[target_feature(enable = "rdm")]
18158#[cfg_attr(test, assert_instr(sqrdmlsh))]
18159#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18160pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18161    unsafe extern "unadjusted" {
18162        #[cfg_attr(
18163            any(target_arch = "aarch64", target_arch = "arm64ec"),
18164            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18165        )]
18166        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18167    }
18168    unsafe { _vqrdmlsh_s16(a, b, c) }
18169}
18170#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18172#[inline]
18173#[target_feature(enable = "rdm")]
18174#[cfg_attr(test, assert_instr(sqrdmlsh))]
18175#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18176pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18177    unsafe extern "unadjusted" {
18178        #[cfg_attr(
18179            any(target_arch = "aarch64", target_arch = "arm64ec"),
18180            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18181        )]
18182        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18183    }
18184    unsafe { _vqrdmlshq_s16(a, b, c) }
18185}
18186#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18188#[inline]
18189#[target_feature(enable = "rdm")]
18190#[cfg_attr(test, assert_instr(sqrdmlsh))]
18191#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18192pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18193    unsafe extern "unadjusted" {
18194        #[cfg_attr(
18195            any(target_arch = "aarch64", target_arch = "arm64ec"),
18196            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18197        )]
18198        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18199    }
18200    unsafe { _vqrdmlsh_s32(a, b, c) }
18201}
18202#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18204#[inline]
18205#[target_feature(enable = "rdm")]
18206#[cfg_attr(test, assert_instr(sqrdmlsh))]
18207#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18208pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18209    unsafe extern "unadjusted" {
18210        #[cfg_attr(
18211            any(target_arch = "aarch64", target_arch = "arm64ec"),
18212            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18213        )]
18214        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18215    }
18216    unsafe { _vqrdmlshq_s32(a, b, c) }
18217}
18218#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18220#[inline]
18221#[target_feature(enable = "rdm")]
18222#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18223#[rustc_legacy_const_generics(3)]
18224#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18225pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18226    static_assert_uimm_bits!(LANE, 2);
18227    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18228}
18229#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18231#[inline]
18232#[target_feature(enable = "rdm")]
18233#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18234#[rustc_legacy_const_generics(3)]
18235#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18236pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18237    static_assert_uimm_bits!(LANE, 3);
18238    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18239}
18240#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18242#[inline]
18243#[target_feature(enable = "rdm")]
18244#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18245#[rustc_legacy_const_generics(3)]
18246#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18247pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18248    static_assert_uimm_bits!(LANE, 1);
18249    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18250}
18251#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18253#[inline]
18254#[target_feature(enable = "rdm")]
18255#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18256#[rustc_legacy_const_generics(3)]
18257#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18258pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18259    static_assert_uimm_bits!(LANE, 2);
18260    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18261}
18262#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18264#[inline]
18265#[target_feature(enable = "rdm")]
18266#[cfg_attr(test, assert_instr(sqrdmlsh))]
18267#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18268pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18269    let a: int16x4_t = vdup_n_s16(a);
18270    let b: int16x4_t = vdup_n_s16(b);
18271    let c: int16x4_t = vdup_n_s16(c);
18272    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18273}
18274#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18276#[inline]
18277#[target_feature(enable = "rdm")]
18278#[cfg_attr(test, assert_instr(sqrdmlsh))]
18279#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18280pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18281    let a: int32x2_t = vdup_n_s32(a);
18282    let b: int32x2_t = vdup_n_s32(b);
18283    let c: int32x2_t = vdup_n_s32(c);
18284    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18285}
18286#[doc = "Signed saturating rounding doubling multiply returning high half"]
18287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18288#[inline]
18289#[target_feature(enable = "neon")]
18290#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18291#[rustc_legacy_const_generics(2)]
18292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18293pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18294    static_assert_uimm_bits!(LANE, 2);
18295    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18296}
18297#[doc = "Signed saturating rounding doubling multiply returning high half"]
18298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18299#[inline]
18300#[target_feature(enable = "neon")]
18301#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18302#[rustc_legacy_const_generics(2)]
18303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18304pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18305    static_assert_uimm_bits!(LANE, 3);
18306    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18307}
18308#[doc = "Signed saturating rounding doubling multiply returning high half"]
18309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18310#[inline]
18311#[target_feature(enable = "neon")]
18312#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18313#[rustc_legacy_const_generics(2)]
18314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18315pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18316    static_assert_uimm_bits!(LANE, 1);
18317    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18318}
18319#[doc = "Signed saturating rounding doubling multiply returning high half"]
18320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18321#[inline]
18322#[target_feature(enable = "neon")]
18323#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18324#[rustc_legacy_const_generics(2)]
18325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18326pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18327    static_assert_uimm_bits!(LANE, 2);
18328    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18329}
18330#[doc = "Signed saturating rounding doubling multiply returning high half"]
18331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18332#[inline]
18333#[target_feature(enable = "neon")]
18334#[cfg_attr(test, assert_instr(sqrdmulh))]
18335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18336pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18337    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18338}
18339#[doc = "Signed saturating rounding doubling multiply returning high half"]
18340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18341#[inline]
18342#[target_feature(enable = "neon")]
18343#[cfg_attr(test, assert_instr(sqrdmulh))]
18344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18345pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18346    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18347}
18348#[doc = "Signed saturating rounding shift left"]
18349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18350#[inline]
18351#[target_feature(enable = "neon")]
18352#[cfg_attr(test, assert_instr(sqrshl))]
18353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18354pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18355    let a: int8x8_t = vdup_n_s8(a);
18356    let b: int8x8_t = vdup_n_s8(b);
18357    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18358}
18359#[doc = "Signed saturating rounding shift left"]
18360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18361#[inline]
18362#[target_feature(enable = "neon")]
18363#[cfg_attr(test, assert_instr(sqrshl))]
18364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18365pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18366    let a: int16x4_t = vdup_n_s16(a);
18367    let b: int16x4_t = vdup_n_s16(b);
18368    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18369}
18370#[doc = "Unsigned signed saturating rounding shift left"]
18371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18372#[inline]
18373#[target_feature(enable = "neon")]
18374#[cfg_attr(test, assert_instr(uqrshl))]
18375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18376pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18377    let a: uint8x8_t = vdup_n_u8(a);
18378    let b: int8x8_t = vdup_n_s8(b);
18379    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18380}
18381#[doc = "Unsigned signed saturating rounding shift left"]
18382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18383#[inline]
18384#[target_feature(enable = "neon")]
18385#[cfg_attr(test, assert_instr(uqrshl))]
18386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18387pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18388    let a: uint16x4_t = vdup_n_u16(a);
18389    let b: int16x4_t = vdup_n_s16(b);
18390    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18391}
18392#[doc = "Signed saturating rounding shift left"]
18393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18394#[inline]
18395#[target_feature(enable = "neon")]
18396#[cfg_attr(test, assert_instr(sqrshl))]
18397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18398pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18399    unsafe extern "unadjusted" {
18400        #[cfg_attr(
18401            any(target_arch = "aarch64", target_arch = "arm64ec"),
18402            link_name = "llvm.aarch64.neon.sqrshl.i64"
18403        )]
18404        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18405    }
18406    unsafe { _vqrshld_s64(a, b) }
18407}
18408#[doc = "Signed saturating rounding shift left"]
18409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18410#[inline]
18411#[target_feature(enable = "neon")]
18412#[cfg_attr(test, assert_instr(sqrshl))]
18413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18414pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18415    unsafe extern "unadjusted" {
18416        #[cfg_attr(
18417            any(target_arch = "aarch64", target_arch = "arm64ec"),
18418            link_name = "llvm.aarch64.neon.sqrshl.i32"
18419        )]
18420        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18421    }
18422    unsafe { _vqrshls_s32(a, b) }
18423}
18424#[doc = "Unsigned signed saturating rounding shift left"]
18425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18426#[inline]
18427#[target_feature(enable = "neon")]
18428#[cfg_attr(test, assert_instr(uqrshl))]
18429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18430pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18431    unsafe extern "unadjusted" {
18432        #[cfg_attr(
18433            any(target_arch = "aarch64", target_arch = "arm64ec"),
18434            link_name = "llvm.aarch64.neon.uqrshl.i32"
18435        )]
18436        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18437    }
18438    unsafe { _vqrshls_u32(a, b) }
18439}
18440#[doc = "Unsigned signed saturating rounding shift left"]
18441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18442#[inline]
18443#[target_feature(enable = "neon")]
18444#[cfg_attr(test, assert_instr(uqrshl))]
18445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18446pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18447    unsafe extern "unadjusted" {
18448        #[cfg_attr(
18449            any(target_arch = "aarch64", target_arch = "arm64ec"),
18450            link_name = "llvm.aarch64.neon.uqrshl.i64"
18451        )]
18452        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18453    }
18454    unsafe { _vqrshld_u64(a, b) }
18455}
18456#[doc = "Signed saturating rounded shift right narrow"]
18457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18458#[inline]
18459#[target_feature(enable = "neon")]
18460#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18461#[rustc_legacy_const_generics(2)]
18462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18463pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18464    static_assert!(N >= 1 && N <= 8);
18465    unsafe {
18466        simd_shuffle!(
18467            a,
18468            vqrshrn_n_s16::<N>(b),
18469            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18470        )
18471    }
18472}
18473#[doc = "Signed saturating rounded shift right narrow"]
18474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18475#[inline]
18476#[target_feature(enable = "neon")]
18477#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18478#[rustc_legacy_const_generics(2)]
18479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18480pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18481    static_assert!(N >= 1 && N <= 16);
18482    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18483}
18484#[doc = "Signed saturating rounded shift right narrow"]
18485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18486#[inline]
18487#[target_feature(enable = "neon")]
18488#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18489#[rustc_legacy_const_generics(2)]
18490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18491pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18492    static_assert!(N >= 1 && N <= 32);
18493    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18494}
18495#[doc = "Unsigned saturating rounded shift right narrow"]
18496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18497#[inline]
18498#[target_feature(enable = "neon")]
18499#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18500#[rustc_legacy_const_generics(2)]
18501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18502pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18503    static_assert!(N >= 1 && N <= 8);
18504    unsafe {
18505        simd_shuffle!(
18506            a,
18507            vqrshrn_n_u16::<N>(b),
18508            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18509        )
18510    }
18511}
18512#[doc = "Unsigned saturating rounded shift right narrow"]
18513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18514#[inline]
18515#[target_feature(enable = "neon")]
18516#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18517#[rustc_legacy_const_generics(2)]
18518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18519pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18520    static_assert!(N >= 1 && N <= 16);
18521    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18522}
18523#[doc = "Unsigned saturating rounded shift right narrow"]
18524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18525#[inline]
18526#[target_feature(enable = "neon")]
18527#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18528#[rustc_legacy_const_generics(2)]
18529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18530pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18531    static_assert!(N >= 1 && N <= 32);
18532    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18533}
18534#[doc = "Unsigned saturating rounded shift right narrow"]
18535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18536#[inline]
18537#[target_feature(enable = "neon")]
18538#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18539#[rustc_legacy_const_generics(1)]
18540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18541pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18542    static_assert!(N >= 1 && N <= 32);
18543    let a: uint64x2_t = vdupq_n_u64(a);
18544    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18545}
18546#[doc = "Unsigned saturating rounded shift right narrow"]
18547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18548#[inline]
18549#[target_feature(enable = "neon")]
18550#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18551#[rustc_legacy_const_generics(1)]
18552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18553pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18554    static_assert!(N >= 1 && N <= 8);
18555    let a: uint16x8_t = vdupq_n_u16(a);
18556    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18557}
18558#[doc = "Unsigned saturating rounded shift right narrow"]
18559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18560#[inline]
18561#[target_feature(enable = "neon")]
18562#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18563#[rustc_legacy_const_generics(1)]
18564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18565pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18566    static_assert!(N >= 1 && N <= 16);
18567    let a: uint32x4_t = vdupq_n_u32(a);
18568    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18569}
18570#[doc = "Signed saturating rounded shift right narrow"]
18571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18572#[inline]
18573#[target_feature(enable = "neon")]
18574#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18575#[rustc_legacy_const_generics(1)]
18576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18577pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18578    static_assert!(N >= 1 && N <= 8);
18579    let a: int16x8_t = vdupq_n_s16(a);
18580    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18581}
18582#[doc = "Signed saturating rounded shift right narrow"]
18583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18584#[inline]
18585#[target_feature(enable = "neon")]
18586#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18587#[rustc_legacy_const_generics(1)]
18588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18589pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18590    static_assert!(N >= 1 && N <= 16);
18591    let a: int32x4_t = vdupq_n_s32(a);
18592    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18593}
18594#[doc = "Signed saturating rounded shift right narrow"]
18595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18596#[inline]
18597#[target_feature(enable = "neon")]
18598#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18599#[rustc_legacy_const_generics(1)]
18600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18601pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18602    static_assert!(N >= 1 && N <= 32);
18603    let a: int64x2_t = vdupq_n_s64(a);
18604    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18605}
18606#[doc = "Signed saturating rounded shift right unsigned narrow"]
18607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18608#[inline]
18609#[target_feature(enable = "neon")]
18610#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18611#[rustc_legacy_const_generics(2)]
18612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18613pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18614    static_assert!(N >= 1 && N <= 8);
18615    unsafe {
18616        simd_shuffle!(
18617            a,
18618            vqrshrun_n_s16::<N>(b),
18619            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18620        )
18621    }
18622}
18623#[doc = "Signed saturating rounded shift right unsigned narrow"]
18624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18625#[inline]
18626#[target_feature(enable = "neon")]
18627#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18628#[rustc_legacy_const_generics(2)]
18629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18630pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18631    static_assert!(N >= 1 && N <= 16);
18632    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18633}
18634#[doc = "Signed saturating rounded shift right unsigned narrow"]
18635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18636#[inline]
18637#[target_feature(enable = "neon")]
18638#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18639#[rustc_legacy_const_generics(2)]
18640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18641pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18642    static_assert!(N >= 1 && N <= 32);
18643    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18644}
18645#[doc = "Signed saturating rounded shift right unsigned narrow"]
18646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18647#[inline]
18648#[target_feature(enable = "neon")]
18649#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18650#[rustc_legacy_const_generics(1)]
18651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18652pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18653    static_assert!(N >= 1 && N <= 32);
18654    let a: int64x2_t = vdupq_n_s64(a);
18655    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18656}
18657#[doc = "Signed saturating rounded shift right unsigned narrow"]
18658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18659#[inline]
18660#[target_feature(enable = "neon")]
18661#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18662#[rustc_legacy_const_generics(1)]
18663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18664pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18665    static_assert!(N >= 1 && N <= 8);
18666    let a: int16x8_t = vdupq_n_s16(a);
18667    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18668}
18669#[doc = "Signed saturating rounded shift right unsigned narrow"]
18670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18671#[inline]
18672#[target_feature(enable = "neon")]
18673#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18674#[rustc_legacy_const_generics(1)]
18675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18676pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18677    static_assert!(N >= 1 && N <= 16);
18678    let a: int32x4_t = vdupq_n_s32(a);
18679    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18680}
18681#[doc = "Signed saturating shift left"]
18682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18683#[inline]
18684#[target_feature(enable = "neon")]
18685#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18686#[rustc_legacy_const_generics(1)]
18687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18688pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18689    static_assert_uimm_bits!(N, 3);
18690    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18691}
18692#[doc = "Signed saturating shift left"]
18693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18694#[inline]
18695#[target_feature(enable = "neon")]
18696#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18697#[rustc_legacy_const_generics(1)]
18698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18699pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18700    static_assert_uimm_bits!(N, 6);
18701    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18702}
18703#[doc = "Signed saturating shift left"]
18704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18705#[inline]
18706#[target_feature(enable = "neon")]
18707#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18708#[rustc_legacy_const_generics(1)]
18709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18710pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18711    static_assert_uimm_bits!(N, 4);
18712    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18713}
18714#[doc = "Signed saturating shift left"]
18715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18716#[inline]
18717#[target_feature(enable = "neon")]
18718#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18719#[rustc_legacy_const_generics(1)]
18720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18721pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18722    static_assert_uimm_bits!(N, 5);
18723    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18724}
18725#[doc = "Unsigned saturating shift left"]
18726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18727#[inline]
18728#[target_feature(enable = "neon")]
18729#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18730#[rustc_legacy_const_generics(1)]
18731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18732pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18733    static_assert_uimm_bits!(N, 3);
18734    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18735}
18736#[doc = "Unsigned saturating shift left"]
18737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18738#[inline]
18739#[target_feature(enable = "neon")]
18740#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18741#[rustc_legacy_const_generics(1)]
18742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18743pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18744    static_assert_uimm_bits!(N, 6);
18745    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18746}
18747#[doc = "Unsigned saturating shift left"]
18748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18749#[inline]
18750#[target_feature(enable = "neon")]
18751#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18752#[rustc_legacy_const_generics(1)]
18753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18754pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18755    static_assert_uimm_bits!(N, 4);
18756    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18757}
18758#[doc = "Unsigned saturating shift left"]
18759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18760#[inline]
18761#[target_feature(enable = "neon")]
18762#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18763#[rustc_legacy_const_generics(1)]
18764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18765pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18766    static_assert_uimm_bits!(N, 5);
18767    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18768}
18769#[doc = "Signed saturating shift left"]
18770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18771#[inline]
18772#[target_feature(enable = "neon")]
18773#[cfg_attr(test, assert_instr(sqshl))]
18774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18775pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18776    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18777    unsafe { simd_extract!(c, 0) }
18778}
18779#[doc = "Signed saturating shift left"]
18780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18781#[inline]
18782#[target_feature(enable = "neon")]
18783#[cfg_attr(test, assert_instr(sqshl))]
18784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18785pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18786    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18787    unsafe { simd_extract!(c, 0) }
18788}
18789#[doc = "Signed saturating shift left"]
18790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18791#[inline]
18792#[target_feature(enable = "neon")]
18793#[cfg_attr(test, assert_instr(sqshl))]
18794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18795pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18796    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18797    unsafe { simd_extract!(c, 0) }
18798}
18799#[doc = "Unsigned saturating shift left"]
18800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18801#[inline]
18802#[target_feature(enable = "neon")]
18803#[cfg_attr(test, assert_instr(uqshl))]
18804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18805pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18806    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18807    unsafe { simd_extract!(c, 0) }
18808}
18809#[doc = "Unsigned saturating shift left"]
18810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18811#[inline]
18812#[target_feature(enable = "neon")]
18813#[cfg_attr(test, assert_instr(uqshl))]
18814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18815pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18816    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18817    unsafe { simd_extract!(c, 0) }
18818}
18819#[doc = "Unsigned saturating shift left"]
18820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18821#[inline]
18822#[target_feature(enable = "neon")]
18823#[cfg_attr(test, assert_instr(uqshl))]
18824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18825pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18826    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18827    unsafe { simd_extract!(c, 0) }
18828}
18829#[doc = "Signed saturating shift left"]
18830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18831#[inline]
18832#[target_feature(enable = "neon")]
18833#[cfg_attr(test, assert_instr(sqshl))]
18834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18835pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18836    unsafe extern "unadjusted" {
18837        #[cfg_attr(
18838            any(target_arch = "aarch64", target_arch = "arm64ec"),
18839            link_name = "llvm.aarch64.neon.sqshl.i64"
18840        )]
18841        fn _vqshld_s64(a: i64, b: i64) -> i64;
18842    }
18843    unsafe { _vqshld_s64(a, b) }
18844}
18845#[doc = "Unsigned saturating shift left"]
18846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18847#[inline]
18848#[target_feature(enable = "neon")]
18849#[cfg_attr(test, assert_instr(uqshl))]
18850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18851pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18852    unsafe extern "unadjusted" {
18853        #[cfg_attr(
18854            any(target_arch = "aarch64", target_arch = "arm64ec"),
18855            link_name = "llvm.aarch64.neon.uqshl.i64"
18856        )]
18857        fn _vqshld_u64(a: u64, b: i64) -> u64;
18858    }
18859    unsafe { _vqshld_u64(a, b) }
18860}
18861#[doc = "Signed saturating shift left unsigned"]
18862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18863#[inline]
18864#[target_feature(enable = "neon")]
18865#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18866#[rustc_legacy_const_generics(1)]
18867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18868pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18869    static_assert_uimm_bits!(N, 3);
18870    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18871}
18872#[doc = "Signed saturating shift left unsigned"]
18873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18874#[inline]
18875#[target_feature(enable = "neon")]
18876#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18877#[rustc_legacy_const_generics(1)]
18878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18879pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18880    static_assert_uimm_bits!(N, 6);
18881    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18882}
18883#[doc = "Signed saturating shift left unsigned"]
18884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18885#[inline]
18886#[target_feature(enable = "neon")]
18887#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18888#[rustc_legacy_const_generics(1)]
18889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18890pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18891    static_assert_uimm_bits!(N, 4);
18892    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18893}
18894#[doc = "Signed saturating shift left unsigned"]
18895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18896#[inline]
18897#[target_feature(enable = "neon")]
18898#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18899#[rustc_legacy_const_generics(1)]
18900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18901pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18902    static_assert_uimm_bits!(N, 5);
18903    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18904}
18905#[doc = "Signed saturating shift right narrow"]
18906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18907#[inline]
18908#[target_feature(enable = "neon")]
18909#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18910#[rustc_legacy_const_generics(2)]
18911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18912pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18913    static_assert!(N >= 1 && N <= 8);
18914    unsafe {
18915        simd_shuffle!(
18916            a,
18917            vqshrn_n_s16::<N>(b),
18918            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18919        )
18920    }
18921}
18922#[doc = "Signed saturating shift right narrow"]
18923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18924#[inline]
18925#[target_feature(enable = "neon")]
18926#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18927#[rustc_legacy_const_generics(2)]
18928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18929pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18930    static_assert!(N >= 1 && N <= 16);
18931    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18932}
18933#[doc = "Signed saturating shift right narrow"]
18934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18935#[inline]
18936#[target_feature(enable = "neon")]
18937#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18938#[rustc_legacy_const_generics(2)]
18939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18940pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18941    static_assert!(N >= 1 && N <= 32);
18942    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18943}
18944#[doc = "Unsigned saturating shift right narrow"]
18945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18946#[inline]
18947#[target_feature(enable = "neon")]
18948#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18949#[rustc_legacy_const_generics(2)]
18950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18951pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18952    static_assert!(N >= 1 && N <= 8);
18953    unsafe {
18954        simd_shuffle!(
18955            a,
18956            vqshrn_n_u16::<N>(b),
18957            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18958        )
18959    }
18960}
18961#[doc = "Unsigned saturating shift right narrow"]
18962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18963#[inline]
18964#[target_feature(enable = "neon")]
18965#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18966#[rustc_legacy_const_generics(2)]
18967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18968pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18969    static_assert!(N >= 1 && N <= 16);
18970    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18971}
18972#[doc = "Unsigned saturating shift right narrow"]
18973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18974#[inline]
18975#[target_feature(enable = "neon")]
18976#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18977#[rustc_legacy_const_generics(2)]
18978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18979pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18980    static_assert!(N >= 1 && N <= 32);
18981    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18982}
18983#[doc = "Signed saturating shift right narrow"]
18984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18985#[inline]
18986#[target_feature(enable = "neon")]
18987#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18988#[rustc_legacy_const_generics(1)]
18989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18990pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18991    static_assert!(N >= 1 && N <= 32);
18992    unsafe extern "unadjusted" {
18993        #[cfg_attr(
18994            any(target_arch = "aarch64", target_arch = "arm64ec"),
18995            link_name = "llvm.aarch64.neon.sqshrn.i32"
18996        )]
18997        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
18998    }
18999    unsafe { _vqshrnd_n_s64(a, N) }
19000}
19001#[doc = "Unsigned saturating shift right narrow"]
19002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19003#[inline]
19004#[target_feature(enable = "neon")]
19005#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19006#[rustc_legacy_const_generics(1)]
19007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19008pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19009    static_assert!(N >= 1 && N <= 32);
19010    unsafe extern "unadjusted" {
19011        #[cfg_attr(
19012            any(target_arch = "aarch64", target_arch = "arm64ec"),
19013            link_name = "llvm.aarch64.neon.uqshrn.i32"
19014        )]
19015        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19016    }
19017    unsafe { _vqshrnd_n_u64(a, N) }
19018}
19019#[doc = "Signed saturating shift right narrow"]
19020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19021#[inline]
19022#[target_feature(enable = "neon")]
19023#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19024#[rustc_legacy_const_generics(1)]
19025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19026pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19027    static_assert!(N >= 1 && N <= 8);
19028    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19029}
19030#[doc = "Signed saturating shift right narrow"]
19031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19032#[inline]
19033#[target_feature(enable = "neon")]
19034#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19035#[rustc_legacy_const_generics(1)]
19036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19037pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19038    static_assert!(N >= 1 && N <= 16);
19039    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19040}
19041#[doc = "Unsigned saturating shift right narrow"]
19042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19043#[inline]
19044#[target_feature(enable = "neon")]
19045#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19046#[rustc_legacy_const_generics(1)]
19047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19048pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19049    static_assert!(N >= 1 && N <= 8);
19050    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19051}
19052#[doc = "Unsigned saturating shift right narrow"]
19053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19054#[inline]
19055#[target_feature(enable = "neon")]
19056#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19057#[rustc_legacy_const_generics(1)]
19058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19059pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19060    static_assert!(N >= 1 && N <= 16);
19061    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19062}
19063#[doc = "Signed saturating shift right unsigned narrow"]
19064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19065#[inline]
19066#[target_feature(enable = "neon")]
19067#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19068#[rustc_legacy_const_generics(2)]
19069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19070pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19071    static_assert!(N >= 1 && N <= 8);
19072    unsafe {
19073        simd_shuffle!(
19074            a,
19075            vqshrun_n_s16::<N>(b),
19076            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19077        )
19078    }
19079}
19080#[doc = "Signed saturating shift right unsigned narrow"]
19081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19082#[inline]
19083#[target_feature(enable = "neon")]
19084#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19085#[rustc_legacy_const_generics(2)]
19086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19087pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19088    static_assert!(N >= 1 && N <= 16);
19089    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19090}
19091#[doc = "Signed saturating shift right unsigned narrow"]
19092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19093#[inline]
19094#[target_feature(enable = "neon")]
19095#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19096#[rustc_legacy_const_generics(2)]
19097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19098pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19099    static_assert!(N >= 1 && N <= 32);
19100    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19101}
19102#[doc = "Signed saturating shift right unsigned narrow"]
19103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19104#[inline]
19105#[target_feature(enable = "neon")]
19106#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19107#[rustc_legacy_const_generics(1)]
19108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19109pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19110    static_assert!(N >= 1 && N <= 32);
19111    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19112}
19113#[doc = "Signed saturating shift right unsigned narrow"]
19114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19115#[inline]
19116#[target_feature(enable = "neon")]
19117#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19118#[rustc_legacy_const_generics(1)]
19119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19120pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19121    static_assert!(N >= 1 && N <= 8);
19122    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19123}
19124#[doc = "Signed saturating shift right unsigned narrow"]
19125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19126#[inline]
19127#[target_feature(enable = "neon")]
19128#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19129#[rustc_legacy_const_generics(1)]
19130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19131pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19132    static_assert!(N >= 1 && N <= 16);
19133    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19134}
19135#[doc = "Saturating subtract"]
19136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19137#[inline]
19138#[target_feature(enable = "neon")]
19139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19140#[cfg_attr(test, assert_instr(sqsub))]
19141pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19142    let a: int8x8_t = vdup_n_s8(a);
19143    let b: int8x8_t = vdup_n_s8(b);
19144    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19145}
19146#[doc = "Saturating subtract"]
19147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19148#[inline]
19149#[target_feature(enable = "neon")]
19150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19151#[cfg_attr(test, assert_instr(sqsub))]
19152pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19153    let a: int16x4_t = vdup_n_s16(a);
19154    let b: int16x4_t = vdup_n_s16(b);
19155    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19156}
19157#[doc = "Saturating subtract"]
19158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19159#[inline]
19160#[target_feature(enable = "neon")]
19161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19162#[cfg_attr(test, assert_instr(uqsub))]
19163pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19164    let a: uint8x8_t = vdup_n_u8(a);
19165    let b: uint8x8_t = vdup_n_u8(b);
19166    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19167}
19168#[doc = "Saturating subtract"]
19169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19170#[inline]
19171#[target_feature(enable = "neon")]
19172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19173#[cfg_attr(test, assert_instr(uqsub))]
19174pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19175    let a: uint16x4_t = vdup_n_u16(a);
19176    let b: uint16x4_t = vdup_n_u16(b);
19177    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19178}
19179#[doc = "Saturating subtract"]
19180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19181#[inline]
19182#[target_feature(enable = "neon")]
19183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19184#[cfg_attr(test, assert_instr(sqsub))]
19185pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19186    unsafe extern "unadjusted" {
19187        #[cfg_attr(
19188            any(target_arch = "aarch64", target_arch = "arm64ec"),
19189            link_name = "llvm.aarch64.neon.sqsub.i32"
19190        )]
19191        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19192    }
19193    unsafe { _vqsubs_s32(a, b) }
19194}
19195#[doc = "Saturating subtract"]
19196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19197#[inline]
19198#[target_feature(enable = "neon")]
19199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19200#[cfg_attr(test, assert_instr(sqsub))]
19201pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19202    unsafe extern "unadjusted" {
19203        #[cfg_attr(
19204            any(target_arch = "aarch64", target_arch = "arm64ec"),
19205            link_name = "llvm.aarch64.neon.sqsub.i64"
19206        )]
19207        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19208    }
19209    unsafe { _vqsubd_s64(a, b) }
19210}
19211#[doc = "Saturating subtract"]
19212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19213#[inline]
19214#[target_feature(enable = "neon")]
19215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19216#[cfg_attr(test, assert_instr(uqsub))]
19217pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19218    unsafe extern "unadjusted" {
19219        #[cfg_attr(
19220            any(target_arch = "aarch64", target_arch = "arm64ec"),
19221            link_name = "llvm.aarch64.neon.uqsub.i32"
19222        )]
19223        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19224    }
19225    unsafe { _vqsubs_u32(a, b) }
19226}
19227#[doc = "Saturating subtract"]
19228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19229#[inline]
19230#[target_feature(enable = "neon")]
19231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19232#[cfg_attr(test, assert_instr(uqsub))]
19233pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19234    unsafe extern "unadjusted" {
19235        #[cfg_attr(
19236            any(target_arch = "aarch64", target_arch = "arm64ec"),
19237            link_name = "llvm.aarch64.neon.uqsub.i64"
19238        )]
19239        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19240    }
19241    unsafe { _vqsubd_u64(a, b) }
19242}
19243#[doc = "Table look-up"]
19244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19245#[inline]
19246#[target_feature(enable = "neon")]
19247#[cfg_attr(test, assert_instr(tbl))]
19248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19249fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19250    unsafe extern "unadjusted" {
19251        #[cfg_attr(
19252            any(target_arch = "aarch64", target_arch = "arm64ec"),
19253            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19254        )]
19255        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19256    }
19257    unsafe { _vqtbl1(a, b) }
19258}
19259#[doc = "Table look-up"]
19260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19261#[inline]
19262#[target_feature(enable = "neon")]
19263#[cfg_attr(test, assert_instr(tbl))]
19264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19265fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19266    unsafe extern "unadjusted" {
19267        #[cfg_attr(
19268            any(target_arch = "aarch64", target_arch = "arm64ec"),
19269            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19270        )]
19271        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19272    }
19273    unsafe { _vqtbl1q(a, b) }
19274}
19275#[doc = "Table look-up"]
19276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19277#[inline]
19278#[target_feature(enable = "neon")]
19279#[cfg_attr(test, assert_instr(tbl))]
19280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19281pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19282    vqtbl1(a, b)
19283}
19284#[doc = "Table look-up"]
19285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19286#[inline]
19287#[target_feature(enable = "neon")]
19288#[cfg_attr(test, assert_instr(tbl))]
19289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19290pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19291    vqtbl1q(a, b)
19292}
19293#[doc = "Table look-up"]
19294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19295#[inline]
19296#[target_feature(enable = "neon")]
19297#[cfg_attr(test, assert_instr(tbl))]
19298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19299pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19300    unsafe { transmute(vqtbl1(transmute(a), b)) }
19301}
19302#[doc = "Table look-up"]
19303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19304#[inline]
19305#[target_feature(enable = "neon")]
19306#[cfg_attr(test, assert_instr(tbl))]
19307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19308pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19309    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19310}
19311#[doc = "Table look-up"]
19312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19313#[inline]
19314#[target_feature(enable = "neon")]
19315#[cfg_attr(test, assert_instr(tbl))]
19316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19317pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19318    unsafe { transmute(vqtbl1(transmute(a), b)) }
19319}
19320#[doc = "Table look-up"]
19321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19322#[inline]
19323#[target_feature(enable = "neon")]
19324#[cfg_attr(test, assert_instr(tbl))]
19325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19326pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19327    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19328}
19329#[doc = "Table look-up"]
19330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19331#[inline]
19332#[target_feature(enable = "neon")]
19333#[cfg_attr(test, assert_instr(tbl))]
19334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19335fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19336    unsafe extern "unadjusted" {
19337        #[cfg_attr(
19338            any(target_arch = "aarch64", target_arch = "arm64ec"),
19339            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19340        )]
19341        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19342    }
19343    unsafe { _vqtbl2(a, b, c) }
19344}
19345#[doc = "Table look-up"]
19346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19347#[inline]
19348#[target_feature(enable = "neon")]
19349#[cfg_attr(test, assert_instr(tbl))]
19350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19351fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19352    unsafe extern "unadjusted" {
19353        #[cfg_attr(
19354            any(target_arch = "aarch64", target_arch = "arm64ec"),
19355            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19356        )]
19357        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19358    }
19359    unsafe { _vqtbl2q(a, b, c) }
19360}
19361#[doc = "Table look-up"]
19362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19363#[inline]
19364#[target_feature(enable = "neon")]
19365#[cfg_attr(test, assert_instr(tbl))]
19366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19367pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19368    vqtbl2(a.0, a.1, b)
19369}
19370#[doc = "Table look-up"]
19371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19372#[inline]
19373#[target_feature(enable = "neon")]
19374#[cfg_attr(test, assert_instr(tbl))]
19375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19376pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19377    vqtbl2q(a.0, a.1, b)
19378}
19379#[doc = "Table look-up"]
19380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19381#[inline]
19382#[cfg(target_endian = "little")]
19383#[target_feature(enable = "neon")]
19384#[cfg_attr(test, assert_instr(tbl))]
19385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19386pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19387    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19388}
19389#[doc = "Table look-up"]
19390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19391#[inline]
19392#[cfg(target_endian = "big")]
19393#[target_feature(enable = "neon")]
19394#[cfg_attr(test, assert_instr(tbl))]
19395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19396pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19397    let mut a: uint8x16x2_t = a;
19398    a.0 = unsafe {
19399        simd_shuffle!(
19400            a.0,
19401            a.0,
19402            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19403        )
19404    };
19405    a.1 = unsafe {
19406        simd_shuffle!(
19407            a.1,
19408            a.1,
19409            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19410        )
19411    };
19412    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19413    unsafe {
19414        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19415        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19416    }
19417}
19418#[doc = "Table look-up"]
19419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19420#[inline]
19421#[cfg(target_endian = "little")]
19422#[target_feature(enable = "neon")]
19423#[cfg_attr(test, assert_instr(tbl))]
19424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19425pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19426    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19427}
19428#[doc = "Table look-up"]
19429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19430#[inline]
19431#[cfg(target_endian = "big")]
19432#[target_feature(enable = "neon")]
19433#[cfg_attr(test, assert_instr(tbl))]
19434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19435pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19436    let mut a: uint8x16x2_t = a;
19437    a.0 = unsafe {
19438        simd_shuffle!(
19439            a.0,
19440            a.0,
19441            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19442        )
19443    };
19444    a.1 = unsafe {
19445        simd_shuffle!(
19446            a.1,
19447            a.1,
19448            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19449        )
19450    };
19451    let b: uint8x16_t =
19452        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19453    unsafe {
19454        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19455        simd_shuffle!(
19456            ret_val,
19457            ret_val,
19458            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19459        )
19460    }
19461}
19462#[doc = "Table look-up"]
19463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19464#[inline]
19465#[cfg(target_endian = "little")]
19466#[target_feature(enable = "neon")]
19467#[cfg_attr(test, assert_instr(tbl))]
19468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19469pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19470    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19471}
19472#[doc = "Table look-up"]
19473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19474#[inline]
19475#[cfg(target_endian = "big")]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(tbl))]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19480    let mut a: poly8x16x2_t = a;
19481    a.0 = unsafe {
19482        simd_shuffle!(
19483            a.0,
19484            a.0,
19485            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19486        )
19487    };
19488    a.1 = unsafe {
19489        simd_shuffle!(
19490            a.1,
19491            a.1,
19492            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19493        )
19494    };
19495    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19496    unsafe {
19497        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19498        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19499    }
19500}
19501#[doc = "Table look-up"]
19502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19503#[inline]
19504#[cfg(target_endian = "little")]
19505#[target_feature(enable = "neon")]
19506#[cfg_attr(test, assert_instr(tbl))]
19507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19508pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19509    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19510}
19511#[doc = "Table look-up"]
19512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19513#[inline]
19514#[cfg(target_endian = "big")]
19515#[target_feature(enable = "neon")]
19516#[cfg_attr(test, assert_instr(tbl))]
19517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19518pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19519    let mut a: poly8x16x2_t = a;
19520    a.0 = unsafe {
19521        simd_shuffle!(
19522            a.0,
19523            a.0,
19524            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19525        )
19526    };
19527    a.1 = unsafe {
19528        simd_shuffle!(
19529            a.1,
19530            a.1,
19531            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19532        )
19533    };
19534    let b: uint8x16_t =
19535        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19536    unsafe {
19537        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19538        simd_shuffle!(
19539            ret_val,
19540            ret_val,
19541            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19542        )
19543    }
19544}
19545#[doc = "Table look-up"]
19546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19547#[inline]
19548#[target_feature(enable = "neon")]
19549#[cfg_attr(test, assert_instr(tbl))]
19550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19551fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19552    unsafe extern "unadjusted" {
19553        #[cfg_attr(
19554            any(target_arch = "aarch64", target_arch = "arm64ec"),
19555            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19556        )]
19557        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19558    }
19559    unsafe { _vqtbl3(a, b, c, d) }
19560}
19561#[doc = "Table look-up"]
19562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19563#[inline]
19564#[target_feature(enable = "neon")]
19565#[cfg_attr(test, assert_instr(tbl))]
19566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19567fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19568    unsafe extern "unadjusted" {
19569        #[cfg_attr(
19570            any(target_arch = "aarch64", target_arch = "arm64ec"),
19571            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19572        )]
19573        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19574    }
19575    unsafe { _vqtbl3q(a, b, c, d) }
19576}
19577#[doc = "Table look-up"]
19578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19579#[inline]
19580#[target_feature(enable = "neon")]
19581#[cfg_attr(test, assert_instr(tbl))]
19582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19583pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19584    vqtbl3(a.0, a.1, a.2, b)
19585}
19586#[doc = "Table look-up"]
19587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19588#[inline]
19589#[target_feature(enable = "neon")]
19590#[cfg_attr(test, assert_instr(tbl))]
19591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19592pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19593    vqtbl3q(a.0, a.1, a.2, b)
19594}
19595#[doc = "Table look-up"]
19596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19597#[inline]
19598#[cfg(target_endian = "little")]
19599#[target_feature(enable = "neon")]
19600#[cfg_attr(test, assert_instr(tbl))]
19601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19602pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19603    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19604}
19605#[doc = "Table look-up"]
19606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19607#[inline]
19608#[cfg(target_endian = "big")]
19609#[target_feature(enable = "neon")]
19610#[cfg_attr(test, assert_instr(tbl))]
19611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19612pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19613    let mut a: uint8x16x3_t = a;
19614    a.0 = unsafe {
19615        simd_shuffle!(
19616            a.0,
19617            a.0,
19618            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19619        )
19620    };
19621    a.1 = unsafe {
19622        simd_shuffle!(
19623            a.1,
19624            a.1,
19625            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19626        )
19627    };
19628    a.2 = unsafe {
19629        simd_shuffle!(
19630            a.2,
19631            a.2,
19632            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19633        )
19634    };
19635    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19636    unsafe {
19637        let ret_val: uint8x8_t =
19638            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19639        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19640    }
19641}
19642#[doc = "Table look-up"]
19643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19644#[inline]
19645#[cfg(target_endian = "little")]
19646#[target_feature(enable = "neon")]
19647#[cfg_attr(test, assert_instr(tbl))]
19648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19649pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19650    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19651}
19652#[doc = "Table look-up"]
19653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19654#[inline]
19655#[cfg(target_endian = "big")]
19656#[target_feature(enable = "neon")]
19657#[cfg_attr(test, assert_instr(tbl))]
19658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19659pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19660    let mut a: uint8x16x3_t = a;
19661    a.0 = unsafe {
19662        simd_shuffle!(
19663            a.0,
19664            a.0,
19665            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19666        )
19667    };
19668    a.1 = unsafe {
19669        simd_shuffle!(
19670            a.1,
19671            a.1,
19672            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19673        )
19674    };
19675    a.2 = unsafe {
19676        simd_shuffle!(
19677            a.2,
19678            a.2,
19679            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19680        )
19681    };
19682    let b: uint8x16_t =
19683        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19684    unsafe {
19685        let ret_val: uint8x16_t =
19686            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19687        simd_shuffle!(
19688            ret_val,
19689            ret_val,
19690            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19691        )
19692    }
19693}
19694#[doc = "Table look-up"]
19695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19696#[inline]
19697#[cfg(target_endian = "little")]
19698#[target_feature(enable = "neon")]
19699#[cfg_attr(test, assert_instr(tbl))]
19700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19701pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19702    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19703}
19704#[doc = "Table look-up"]
19705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19706#[inline]
19707#[cfg(target_endian = "big")]
19708#[target_feature(enable = "neon")]
19709#[cfg_attr(test, assert_instr(tbl))]
19710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19711pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19712    let mut a: poly8x16x3_t = a;
19713    a.0 = unsafe {
19714        simd_shuffle!(
19715            a.0,
19716            a.0,
19717            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19718        )
19719    };
19720    a.1 = unsafe {
19721        simd_shuffle!(
19722            a.1,
19723            a.1,
19724            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19725        )
19726    };
19727    a.2 = unsafe {
19728        simd_shuffle!(
19729            a.2,
19730            a.2,
19731            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19732        )
19733    };
19734    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19735    unsafe {
19736        let ret_val: poly8x8_t =
19737            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19738        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19739    }
19740}
19741#[doc = "Table look-up"]
19742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19743#[inline]
19744#[cfg(target_endian = "little")]
19745#[target_feature(enable = "neon")]
19746#[cfg_attr(test, assert_instr(tbl))]
19747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19748pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19749    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19750}
19751#[doc = "Table look-up"]
19752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19753#[inline]
19754#[cfg(target_endian = "big")]
19755#[target_feature(enable = "neon")]
19756#[cfg_attr(test, assert_instr(tbl))]
19757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19758pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19759    let mut a: poly8x16x3_t = a;
19760    a.0 = unsafe {
19761        simd_shuffle!(
19762            a.0,
19763            a.0,
19764            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19765        )
19766    };
19767    a.1 = unsafe {
19768        simd_shuffle!(
19769            a.1,
19770            a.1,
19771            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19772        )
19773    };
19774    a.2 = unsafe {
19775        simd_shuffle!(
19776            a.2,
19777            a.2,
19778            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19779        )
19780    };
19781    let b: uint8x16_t =
19782        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19783    unsafe {
19784        let ret_val: poly8x16_t =
19785            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19786        simd_shuffle!(
19787            ret_val,
19788            ret_val,
19789            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19790        )
19791    }
19792}
19793#[doc = "Table look-up"]
19794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19795#[inline]
19796#[target_feature(enable = "neon")]
19797#[cfg_attr(test, assert_instr(tbl))]
19798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19799fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19800    unsafe extern "unadjusted" {
19801        #[cfg_attr(
19802            any(target_arch = "aarch64", target_arch = "arm64ec"),
19803            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19804        )]
19805        fn _vqtbl4(
19806            a: int8x16_t,
19807            b: int8x16_t,
19808            c: int8x16_t,
19809            d: int8x16_t,
19810            e: uint8x8_t,
19811        ) -> int8x8_t;
19812    }
19813    unsafe { _vqtbl4(a, b, c, d, e) }
19814}
19815#[doc = "Table look-up"]
19816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19817#[inline]
19818#[target_feature(enable = "neon")]
19819#[cfg_attr(test, assert_instr(tbl))]
19820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19821fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19822    unsafe extern "unadjusted" {
19823        #[cfg_attr(
19824            any(target_arch = "aarch64", target_arch = "arm64ec"),
19825            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19826        )]
19827        fn _vqtbl4q(
19828            a: int8x16_t,
19829            b: int8x16_t,
19830            c: int8x16_t,
19831            d: int8x16_t,
19832            e: uint8x16_t,
19833        ) -> int8x16_t;
19834    }
19835    unsafe { _vqtbl4q(a, b, c, d, e) }
19836}
19837#[doc = "Table look-up"]
19838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19839#[inline]
19840#[target_feature(enable = "neon")]
19841#[cfg_attr(test, assert_instr(tbl))]
19842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19843pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19844    vqtbl4(a.0, a.1, a.2, a.3, b)
19845}
19846#[doc = "Table look-up"]
19847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19848#[inline]
19849#[target_feature(enable = "neon")]
19850#[cfg_attr(test, assert_instr(tbl))]
19851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19852pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19853    vqtbl4q(a.0, a.1, a.2, a.3, b)
19854}
19855#[doc = "Table look-up"]
19856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19857#[inline]
19858#[cfg(target_endian = "little")]
19859#[target_feature(enable = "neon")]
19860#[cfg_attr(test, assert_instr(tbl))]
19861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19862pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19863    unsafe {
19864        transmute(vqtbl4(
19865            transmute(a.0),
19866            transmute(a.1),
19867            transmute(a.2),
19868            transmute(a.3),
19869            b,
19870        ))
19871    }
19872}
19873#[doc = "Table look-up"]
19874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19875#[inline]
19876#[cfg(target_endian = "big")]
19877#[target_feature(enable = "neon")]
19878#[cfg_attr(test, assert_instr(tbl))]
19879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19880pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19881    let mut a: uint8x16x4_t = a;
19882    a.0 = unsafe {
19883        simd_shuffle!(
19884            a.0,
19885            a.0,
19886            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19887        )
19888    };
19889    a.1 = unsafe {
19890        simd_shuffle!(
19891            a.1,
19892            a.1,
19893            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19894        )
19895    };
19896    a.2 = unsafe {
19897        simd_shuffle!(
19898            a.2,
19899            a.2,
19900            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19901        )
19902    };
19903    a.3 = unsafe {
19904        simd_shuffle!(
19905            a.3,
19906            a.3,
19907            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19908        )
19909    };
19910    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19911    unsafe {
19912        let ret_val: uint8x8_t = transmute(vqtbl4(
19913            transmute(a.0),
19914            transmute(a.1),
19915            transmute(a.2),
19916            transmute(a.3),
19917            b,
19918        ));
19919        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19920    }
19921}
19922#[doc = "Table look-up"]
19923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19924#[inline]
19925#[cfg(target_endian = "little")]
19926#[target_feature(enable = "neon")]
19927#[cfg_attr(test, assert_instr(tbl))]
19928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19929pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19930    unsafe {
19931        transmute(vqtbl4q(
19932            transmute(a.0),
19933            transmute(a.1),
19934            transmute(a.2),
19935            transmute(a.3),
19936            b,
19937        ))
19938    }
19939}
19940#[doc = "Table look-up"]
19941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19942#[inline]
19943#[cfg(target_endian = "big")]
19944#[target_feature(enable = "neon")]
19945#[cfg_attr(test, assert_instr(tbl))]
19946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19947pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19948    let mut a: uint8x16x4_t = a;
19949    a.0 = unsafe {
19950        simd_shuffle!(
19951            a.0,
19952            a.0,
19953            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19954        )
19955    };
19956    a.1 = unsafe {
19957        simd_shuffle!(
19958            a.1,
19959            a.1,
19960            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19961        )
19962    };
19963    a.2 = unsafe {
19964        simd_shuffle!(
19965            a.2,
19966            a.2,
19967            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19968        )
19969    };
19970    a.3 = unsafe {
19971        simd_shuffle!(
19972            a.3,
19973            a.3,
19974            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19975        )
19976    };
19977    let b: uint8x16_t =
19978        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19979    unsafe {
19980        let ret_val: uint8x16_t = transmute(vqtbl4q(
19981            transmute(a.0),
19982            transmute(a.1),
19983            transmute(a.2),
19984            transmute(a.3),
19985            b,
19986        ));
19987        simd_shuffle!(
19988            ret_val,
19989            ret_val,
19990            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19991        )
19992    }
19993}
19994#[doc = "Table look-up"]
19995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19996#[inline]
19997#[cfg(target_endian = "little")]
19998#[target_feature(enable = "neon")]
19999#[cfg_attr(test, assert_instr(tbl))]
20000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20001pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20002    unsafe {
20003        transmute(vqtbl4(
20004            transmute(a.0),
20005            transmute(a.1),
20006            transmute(a.2),
20007            transmute(a.3),
20008            b,
20009        ))
20010    }
20011}
20012#[doc = "Table look-up"]
20013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20014#[inline]
20015#[cfg(target_endian = "big")]
20016#[target_feature(enable = "neon")]
20017#[cfg_attr(test, assert_instr(tbl))]
20018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20019pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20020    let mut a: poly8x16x4_t = a;
20021    a.0 = unsafe {
20022        simd_shuffle!(
20023            a.0,
20024            a.0,
20025            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20026        )
20027    };
20028    a.1 = unsafe {
20029        simd_shuffle!(
20030            a.1,
20031            a.1,
20032            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20033        )
20034    };
20035    a.2 = unsafe {
20036        simd_shuffle!(
20037            a.2,
20038            a.2,
20039            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20040        )
20041    };
20042    a.3 = unsafe {
20043        simd_shuffle!(
20044            a.3,
20045            a.3,
20046            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20047        )
20048    };
20049    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20050    unsafe {
20051        let ret_val: poly8x8_t = transmute(vqtbl4(
20052            transmute(a.0),
20053            transmute(a.1),
20054            transmute(a.2),
20055            transmute(a.3),
20056            b,
20057        ));
20058        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20059    }
20060}
20061#[doc = "Table look-up"]
20062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20063#[inline]
20064#[cfg(target_endian = "little")]
20065#[target_feature(enable = "neon")]
20066#[cfg_attr(test, assert_instr(tbl))]
20067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20068pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20069    unsafe {
20070        transmute(vqtbl4q(
20071            transmute(a.0),
20072            transmute(a.1),
20073            transmute(a.2),
20074            transmute(a.3),
20075            b,
20076        ))
20077    }
20078}
20079#[doc = "Table look-up"]
20080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20081#[inline]
20082#[cfg(target_endian = "big")]
20083#[target_feature(enable = "neon")]
20084#[cfg_attr(test, assert_instr(tbl))]
20085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20086pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20087    let mut a: poly8x16x4_t = a;
20088    a.0 = unsafe {
20089        simd_shuffle!(
20090            a.0,
20091            a.0,
20092            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20093        )
20094    };
20095    a.1 = unsafe {
20096        simd_shuffle!(
20097            a.1,
20098            a.1,
20099            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20100        )
20101    };
20102    a.2 = unsafe {
20103        simd_shuffle!(
20104            a.2,
20105            a.2,
20106            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20107        )
20108    };
20109    a.3 = unsafe {
20110        simd_shuffle!(
20111            a.3,
20112            a.3,
20113            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20114        )
20115    };
20116    let b: uint8x16_t =
20117        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20118    unsafe {
20119        let ret_val: poly8x16_t = transmute(vqtbl4q(
20120            transmute(a.0),
20121            transmute(a.1),
20122            transmute(a.2),
20123            transmute(a.3),
20124            b,
20125        ));
20126        simd_shuffle!(
20127            ret_val,
20128            ret_val,
20129            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20130        )
20131    }
20132}
20133#[doc = "Extended table look-up"]
20134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20135#[inline]
20136#[target_feature(enable = "neon")]
20137#[cfg_attr(test, assert_instr(tbx))]
20138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20139fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20140    unsafe extern "unadjusted" {
20141        #[cfg_attr(
20142            any(target_arch = "aarch64", target_arch = "arm64ec"),
20143            link_name = "llvm.aarch64.neon.tbx1.v8i8"
20144        )]
20145        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20146    }
20147    unsafe { _vqtbx1(a, b, c) }
20148}
20149#[doc = "Extended table look-up"]
20150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20151#[inline]
20152#[target_feature(enable = "neon")]
20153#[cfg_attr(test, assert_instr(tbx))]
20154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20155fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20156    unsafe extern "unadjusted" {
20157        #[cfg_attr(
20158            any(target_arch = "aarch64", target_arch = "arm64ec"),
20159            link_name = "llvm.aarch64.neon.tbx1.v16i8"
20160        )]
20161        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20162    }
20163    unsafe { _vqtbx1q(a, b, c) }
20164}
20165#[doc = "Extended table look-up"]
20166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20167#[inline]
20168#[target_feature(enable = "neon")]
20169#[cfg_attr(test, assert_instr(tbx))]
20170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20171pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20172    vqtbx1(a, b, c)
20173}
20174#[doc = "Extended table look-up"]
20175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20176#[inline]
20177#[target_feature(enable = "neon")]
20178#[cfg_attr(test, assert_instr(tbx))]
20179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20180pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20181    vqtbx1q(a, b, c)
20182}
20183#[doc = "Extended table look-up"]
20184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20185#[inline]
20186#[target_feature(enable = "neon")]
20187#[cfg_attr(test, assert_instr(tbx))]
20188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20189pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20190    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20191}
20192#[doc = "Extended table look-up"]
20193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20194#[inline]
20195#[target_feature(enable = "neon")]
20196#[cfg_attr(test, assert_instr(tbx))]
20197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20198pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20199    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20200}
20201#[doc = "Extended table look-up"]
20202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20203#[inline]
20204#[target_feature(enable = "neon")]
20205#[cfg_attr(test, assert_instr(tbx))]
20206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20207pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20208    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20209}
20210#[doc = "Extended table look-up"]
20211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20212#[inline]
20213#[target_feature(enable = "neon")]
20214#[cfg_attr(test, assert_instr(tbx))]
20215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20216pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20217    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20218}
20219#[doc = "Extended table look-up"]
20220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20221#[inline]
20222#[target_feature(enable = "neon")]
20223#[cfg_attr(test, assert_instr(tbx))]
20224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20225fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20226    unsafe extern "unadjusted" {
20227        #[cfg_attr(
20228            any(target_arch = "aarch64", target_arch = "arm64ec"),
20229            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20230        )]
20231        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20232    }
20233    unsafe { _vqtbx2(a, b, c, d) }
20234}
20235#[doc = "Extended table look-up"]
20236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20237#[inline]
20238#[target_feature(enable = "neon")]
20239#[cfg_attr(test, assert_instr(tbx))]
20240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20241fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20242    unsafe extern "unadjusted" {
20243        #[cfg_attr(
20244            any(target_arch = "aarch64", target_arch = "arm64ec"),
20245            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20246        )]
20247        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20248    }
20249    unsafe { _vqtbx2q(a, b, c, d) }
20250}
20251#[doc = "Extended table look-up"]
20252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20253#[inline]
20254#[target_feature(enable = "neon")]
20255#[cfg_attr(test, assert_instr(tbx))]
20256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20257pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20258    vqtbx2(a, b.0, b.1, c)
20259}
20260#[doc = "Extended table look-up"]
20261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20262#[inline]
20263#[target_feature(enable = "neon")]
20264#[cfg_attr(test, assert_instr(tbx))]
20265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20266pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20267    vqtbx2q(a, b.0, b.1, c)
20268}
20269#[doc = "Extended table look-up"]
20270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20271#[inline]
20272#[cfg(target_endian = "little")]
20273#[target_feature(enable = "neon")]
20274#[cfg_attr(test, assert_instr(tbx))]
20275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20276pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20277    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20278}
20279#[doc = "Extended table look-up"]
20280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20281#[inline]
20282#[cfg(target_endian = "big")]
20283#[target_feature(enable = "neon")]
20284#[cfg_attr(test, assert_instr(tbx))]
20285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20286pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20287    let mut b: uint8x16x2_t = b;
20288    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20289    b.0 = unsafe {
20290        simd_shuffle!(
20291            b.0,
20292            b.0,
20293            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20294        )
20295    };
20296    b.1 = unsafe {
20297        simd_shuffle!(
20298            b.1,
20299            b.1,
20300            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20301        )
20302    };
20303    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20304    unsafe {
20305        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20306        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20307    }
20308}
20309#[doc = "Extended table look-up"]
20310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20311#[inline]
20312#[cfg(target_endian = "little")]
20313#[target_feature(enable = "neon")]
20314#[cfg_attr(test, assert_instr(tbx))]
20315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20316pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20317    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20318}
20319#[doc = "Extended table look-up"]
20320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20321#[inline]
20322#[cfg(target_endian = "big")]
20323#[target_feature(enable = "neon")]
20324#[cfg_attr(test, assert_instr(tbx))]
20325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20326pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20327    let mut b: uint8x16x2_t = b;
20328    let a: uint8x16_t =
20329        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20330    b.0 = unsafe {
20331        simd_shuffle!(
20332            b.0,
20333            b.0,
20334            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20335        )
20336    };
20337    b.1 = unsafe {
20338        simd_shuffle!(
20339            b.1,
20340            b.1,
20341            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20342        )
20343    };
20344    let c: uint8x16_t =
20345        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20346    unsafe {
20347        let ret_val: uint8x16_t =
20348            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20349        simd_shuffle!(
20350            ret_val,
20351            ret_val,
20352            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20353        )
20354    }
20355}
20356#[doc = "Extended table look-up"]
20357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20358#[inline]
20359#[cfg(target_endian = "little")]
20360#[target_feature(enable = "neon")]
20361#[cfg_attr(test, assert_instr(tbx))]
20362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20363pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20364    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20365}
20366#[doc = "Extended table look-up"]
20367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20368#[inline]
20369#[cfg(target_endian = "big")]
20370#[target_feature(enable = "neon")]
20371#[cfg_attr(test, assert_instr(tbx))]
20372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20373pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20374    let mut b: poly8x16x2_t = b;
20375    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20376    b.0 = unsafe {
20377        simd_shuffle!(
20378            b.0,
20379            b.0,
20380            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20381        )
20382    };
20383    b.1 = unsafe {
20384        simd_shuffle!(
20385            b.1,
20386            b.1,
20387            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20388        )
20389    };
20390    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20391    unsafe {
20392        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20393        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20394    }
20395}
20396#[doc = "Extended table look-up"]
20397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20398#[inline]
20399#[cfg(target_endian = "little")]
20400#[target_feature(enable = "neon")]
20401#[cfg_attr(test, assert_instr(tbx))]
20402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20403pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20404    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20405}
20406#[doc = "Extended table look-up"]
20407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20408#[inline]
20409#[cfg(target_endian = "big")]
20410#[target_feature(enable = "neon")]
20411#[cfg_attr(test, assert_instr(tbx))]
20412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20413pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20414    let mut b: poly8x16x2_t = b;
20415    let a: poly8x16_t =
20416        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20417    b.0 = unsafe {
20418        simd_shuffle!(
20419            b.0,
20420            b.0,
20421            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20422        )
20423    };
20424    b.1 = unsafe {
20425        simd_shuffle!(
20426            b.1,
20427            b.1,
20428            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20429        )
20430    };
20431    let c: uint8x16_t =
20432        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20433    unsafe {
20434        let ret_val: poly8x16_t =
20435            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20436        simd_shuffle!(
20437            ret_val,
20438            ret_val,
20439            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20440        )
20441    }
20442}
20443#[doc = "Extended table look-up"]
20444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20445#[inline]
20446#[target_feature(enable = "neon")]
20447#[cfg_attr(test, assert_instr(tbx))]
20448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20449fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20450    unsafe extern "unadjusted" {
20451        #[cfg_attr(
20452            any(target_arch = "aarch64", target_arch = "arm64ec"),
20453            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20454        )]
20455        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20456            -> int8x8_t;
20457    }
20458    unsafe { _vqtbx3(a, b, c, d, e) }
20459}
20460#[doc = "Extended table look-up"]
20461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20462#[inline]
20463#[target_feature(enable = "neon")]
20464#[cfg_attr(test, assert_instr(tbx))]
20465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20466fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20467    unsafe extern "unadjusted" {
20468        #[cfg_attr(
20469            any(target_arch = "aarch64", target_arch = "arm64ec"),
20470            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20471        )]
20472        fn _vqtbx3q(
20473            a: int8x16_t,
20474            b: int8x16_t,
20475            c: int8x16_t,
20476            d: int8x16_t,
20477            e: uint8x16_t,
20478        ) -> int8x16_t;
20479    }
20480    unsafe { _vqtbx3q(a, b, c, d, e) }
20481}
20482#[doc = "Extended table look-up"]
20483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20484#[inline]
20485#[target_feature(enable = "neon")]
20486#[cfg_attr(test, assert_instr(tbx))]
20487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20488pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20489    vqtbx3(a, b.0, b.1, b.2, c)
20490}
20491#[doc = "Extended table look-up"]
20492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20493#[inline]
20494#[target_feature(enable = "neon")]
20495#[cfg_attr(test, assert_instr(tbx))]
20496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20497pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20498    vqtbx3q(a, b.0, b.1, b.2, c)
20499}
20500#[doc = "Extended table look-up"]
20501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20502#[inline]
20503#[cfg(target_endian = "little")]
20504#[target_feature(enable = "neon")]
20505#[cfg_attr(test, assert_instr(tbx))]
20506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20507pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20508    unsafe {
20509        transmute(vqtbx3(
20510            transmute(a),
20511            transmute(b.0),
20512            transmute(b.1),
20513            transmute(b.2),
20514            c,
20515        ))
20516    }
20517}
20518#[doc = "Extended table look-up"]
20519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20520#[inline]
20521#[cfg(target_endian = "big")]
20522#[target_feature(enable = "neon")]
20523#[cfg_attr(test, assert_instr(tbx))]
20524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20525pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20526    let mut b: uint8x16x3_t = b;
20527    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20528    b.0 = unsafe {
20529        simd_shuffle!(
20530            b.0,
20531            b.0,
20532            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20533        )
20534    };
20535    b.1 = unsafe {
20536        simd_shuffle!(
20537            b.1,
20538            b.1,
20539            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20540        )
20541    };
20542    b.2 = unsafe {
20543        simd_shuffle!(
20544            b.2,
20545            b.2,
20546            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20547        )
20548    };
20549    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20550    unsafe {
20551        let ret_val: uint8x8_t = transmute(vqtbx3(
20552            transmute(a),
20553            transmute(b.0),
20554            transmute(b.1),
20555            transmute(b.2),
20556            c,
20557        ));
20558        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20559    }
20560}
20561#[doc = "Extended table look-up"]
20562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20563#[inline]
20564#[cfg(target_endian = "little")]
20565#[target_feature(enable = "neon")]
20566#[cfg_attr(test, assert_instr(tbx))]
20567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20568pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20569    unsafe {
20570        transmute(vqtbx3q(
20571            transmute(a),
20572            transmute(b.0),
20573            transmute(b.1),
20574            transmute(b.2),
20575            c,
20576        ))
20577    }
20578}
20579#[doc = "Extended table look-up"]
20580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20581#[inline]
20582#[cfg(target_endian = "big")]
20583#[target_feature(enable = "neon")]
20584#[cfg_attr(test, assert_instr(tbx))]
20585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20586pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20587    let mut b: uint8x16x3_t = b;
20588    let a: uint8x16_t =
20589        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20590    b.0 = unsafe {
20591        simd_shuffle!(
20592            b.0,
20593            b.0,
20594            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20595        )
20596    };
20597    b.1 = unsafe {
20598        simd_shuffle!(
20599            b.1,
20600            b.1,
20601            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20602        )
20603    };
20604    b.2 = unsafe {
20605        simd_shuffle!(
20606            b.2,
20607            b.2,
20608            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20609        )
20610    };
20611    let c: uint8x16_t =
20612        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20613    unsafe {
20614        let ret_val: uint8x16_t = transmute(vqtbx3q(
20615            transmute(a),
20616            transmute(b.0),
20617            transmute(b.1),
20618            transmute(b.2),
20619            c,
20620        ));
20621        simd_shuffle!(
20622            ret_val,
20623            ret_val,
20624            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20625        )
20626    }
20627}
20628#[doc = "Extended table look-up"]
20629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20630#[inline]
20631#[cfg(target_endian = "little")]
20632#[target_feature(enable = "neon")]
20633#[cfg_attr(test, assert_instr(tbx))]
20634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20635pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20636    unsafe {
20637        transmute(vqtbx3(
20638            transmute(a),
20639            transmute(b.0),
20640            transmute(b.1),
20641            transmute(b.2),
20642            c,
20643        ))
20644    }
20645}
20646#[doc = "Extended table look-up"]
20647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20648#[inline]
20649#[cfg(target_endian = "big")]
20650#[target_feature(enable = "neon")]
20651#[cfg_attr(test, assert_instr(tbx))]
20652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20653pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20654    let mut b: poly8x16x3_t = b;
20655    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20656    b.0 = unsafe {
20657        simd_shuffle!(
20658            b.0,
20659            b.0,
20660            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20661        )
20662    };
20663    b.1 = unsafe {
20664        simd_shuffle!(
20665            b.1,
20666            b.1,
20667            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20668        )
20669    };
20670    b.2 = unsafe {
20671        simd_shuffle!(
20672            b.2,
20673            b.2,
20674            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20675        )
20676    };
20677    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20678    unsafe {
20679        let ret_val: poly8x8_t = transmute(vqtbx3(
20680            transmute(a),
20681            transmute(b.0),
20682            transmute(b.1),
20683            transmute(b.2),
20684            c,
20685        ));
20686        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20687    }
20688}
20689#[doc = "Extended table look-up"]
20690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20691#[inline]
20692#[cfg(target_endian = "little")]
20693#[target_feature(enable = "neon")]
20694#[cfg_attr(test, assert_instr(tbx))]
20695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20696pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20697    unsafe {
20698        transmute(vqtbx3q(
20699            transmute(a),
20700            transmute(b.0),
20701            transmute(b.1),
20702            transmute(b.2),
20703            c,
20704        ))
20705    }
20706}
20707#[doc = "Extended table look-up"]
20708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20709#[inline]
20710#[cfg(target_endian = "big")]
20711#[target_feature(enable = "neon")]
20712#[cfg_attr(test, assert_instr(tbx))]
20713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20714pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20715    let mut b: poly8x16x3_t = b;
20716    let a: poly8x16_t =
20717        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20718    b.0 = unsafe {
20719        simd_shuffle!(
20720            b.0,
20721            b.0,
20722            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20723        )
20724    };
20725    b.1 = unsafe {
20726        simd_shuffle!(
20727            b.1,
20728            b.1,
20729            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20730        )
20731    };
20732    b.2 = unsafe {
20733        simd_shuffle!(
20734            b.2,
20735            b.2,
20736            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20737        )
20738    };
20739    let c: uint8x16_t =
20740        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20741    unsafe {
20742        let ret_val: poly8x16_t = transmute(vqtbx3q(
20743            transmute(a),
20744            transmute(b.0),
20745            transmute(b.1),
20746            transmute(b.2),
20747            c,
20748        ));
20749        simd_shuffle!(
20750            ret_val,
20751            ret_val,
20752            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20753        )
20754    }
20755}
20756#[doc = "Extended table look-up"]
20757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20758#[inline]
20759#[target_feature(enable = "neon")]
20760#[cfg_attr(test, assert_instr(tbx))]
20761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20762fn vqtbx4(
20763    a: int8x8_t,
20764    b: int8x16_t,
20765    c: int8x16_t,
20766    d: int8x16_t,
20767    e: int8x16_t,
20768    f: uint8x8_t,
20769) -> int8x8_t {
20770    unsafe extern "unadjusted" {
20771        #[cfg_attr(
20772            any(target_arch = "aarch64", target_arch = "arm64ec"),
20773            link_name = "llvm.aarch64.neon.tbx4.v8i8"
20774        )]
20775        fn _vqtbx4(
20776            a: int8x8_t,
20777            b: int8x16_t,
20778            c: int8x16_t,
20779            d: int8x16_t,
20780            e: int8x16_t,
20781            f: uint8x8_t,
20782        ) -> int8x8_t;
20783    }
20784    unsafe { _vqtbx4(a, b, c, d, e, f) }
20785}
20786#[doc = "Extended table look-up"]
20787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
20788#[inline]
20789#[target_feature(enable = "neon")]
20790#[cfg_attr(test, assert_instr(tbx))]
20791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20792fn vqtbx4q(
20793    a: int8x16_t,
20794    b: int8x16_t,
20795    c: int8x16_t,
20796    d: int8x16_t,
20797    e: int8x16_t,
20798    f: uint8x16_t,
20799) -> int8x16_t {
20800    unsafe extern "unadjusted" {
20801        #[cfg_attr(
20802            any(target_arch = "aarch64", target_arch = "arm64ec"),
20803            link_name = "llvm.aarch64.neon.tbx4.v16i8"
20804        )]
20805        fn _vqtbx4q(
20806            a: int8x16_t,
20807            b: int8x16_t,
20808            c: int8x16_t,
20809            d: int8x16_t,
20810            e: int8x16_t,
20811            f: uint8x16_t,
20812        ) -> int8x16_t;
20813    }
20814    unsafe { _vqtbx4q(a, b, c, d, e, f) }
20815}
20816#[doc = "Extended table look-up"]
20817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
20818#[inline]
20819#[target_feature(enable = "neon")]
20820#[cfg_attr(test, assert_instr(tbx))]
20821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20822pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
20823    vqtbx4(a, b.0, b.1, b.2, b.3, c)
20824}
20825#[doc = "Extended table look-up"]
20826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
20827#[inline]
20828#[target_feature(enable = "neon")]
20829#[cfg_attr(test, assert_instr(tbx))]
20830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20831pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
20832    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
20833}
20834#[doc = "Extended table look-up"]
20835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20836#[inline]
20837#[cfg(target_endian = "little")]
20838#[target_feature(enable = "neon")]
20839#[cfg_attr(test, assert_instr(tbx))]
20840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20841pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20842    unsafe {
20843        transmute(vqtbx4(
20844            transmute(a),
20845            transmute(b.0),
20846            transmute(b.1),
20847            transmute(b.2),
20848            transmute(b.3),
20849            c,
20850        ))
20851    }
20852}
20853#[doc = "Extended table look-up"]
20854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20855#[inline]
20856#[cfg(target_endian = "big")]
20857#[target_feature(enable = "neon")]
20858#[cfg_attr(test, assert_instr(tbx))]
20859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20860pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20861    let mut b: uint8x16x4_t = b;
20862    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20863    b.0 = unsafe {
20864        simd_shuffle!(
20865            b.0,
20866            b.0,
20867            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20868        )
20869    };
20870    b.1 = unsafe {
20871        simd_shuffle!(
20872            b.1,
20873            b.1,
20874            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20875        )
20876    };
20877    b.2 = unsafe {
20878        simd_shuffle!(
20879            b.2,
20880            b.2,
20881            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20882        )
20883    };
20884    b.3 = unsafe {
20885        simd_shuffle!(
20886            b.3,
20887            b.3,
20888            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20889        )
20890    };
20891    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20892    unsafe {
20893        let ret_val: uint8x8_t = transmute(vqtbx4(
20894            transmute(a),
20895            transmute(b.0),
20896            transmute(b.1),
20897            transmute(b.2),
20898            transmute(b.3),
20899            c,
20900        ));
20901        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20902    }
20903}
20904#[doc = "Extended table look-up"]
20905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20906#[inline]
20907#[cfg(target_endian = "little")]
20908#[target_feature(enable = "neon")]
20909#[cfg_attr(test, assert_instr(tbx))]
20910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20911pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20912    unsafe {
20913        transmute(vqtbx4q(
20914            transmute(a),
20915            transmute(b.0),
20916            transmute(b.1),
20917            transmute(b.2),
20918            transmute(b.3),
20919            c,
20920        ))
20921    }
20922}
20923#[doc = "Extended table look-up"]
20924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20925#[inline]
20926#[cfg(target_endian = "big")]
20927#[target_feature(enable = "neon")]
20928#[cfg_attr(test, assert_instr(tbx))]
20929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20930pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20931    let mut b: uint8x16x4_t = b;
20932    let a: uint8x16_t =
20933        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20934    b.0 = unsafe {
20935        simd_shuffle!(
20936            b.0,
20937            b.0,
20938            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20939        )
20940    };
20941    b.1 = unsafe {
20942        simd_shuffle!(
20943            b.1,
20944            b.1,
20945            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20946        )
20947    };
20948    b.2 = unsafe {
20949        simd_shuffle!(
20950            b.2,
20951            b.2,
20952            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20953        )
20954    };
20955    b.3 = unsafe {
20956        simd_shuffle!(
20957            b.3,
20958            b.3,
20959            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20960        )
20961    };
20962    let c: uint8x16_t =
20963        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20964    unsafe {
20965        let ret_val: uint8x16_t = transmute(vqtbx4q(
20966            transmute(a),
20967            transmute(b.0),
20968            transmute(b.1),
20969            transmute(b.2),
20970            transmute(b.3),
20971            c,
20972        ));
20973        simd_shuffle!(
20974            ret_val,
20975            ret_val,
20976            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20977        )
20978    }
20979}
20980#[doc = "Extended table look-up"]
20981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
20982#[inline]
20983#[cfg(target_endian = "little")]
20984#[target_feature(enable = "neon")]
20985#[cfg_attr(test, assert_instr(tbx))]
20986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20987pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
20988    unsafe {
20989        transmute(vqtbx4(
20990            transmute(a),
20991            transmute(b.0),
20992            transmute(b.1),
20993            transmute(b.2),
20994            transmute(b.3),
20995            c,
20996        ))
20997    }
20998}
20999#[doc = "Extended table look-up"]
21000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21001#[inline]
21002#[cfg(target_endian = "big")]
21003#[target_feature(enable = "neon")]
21004#[cfg_attr(test, assert_instr(tbx))]
21005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21006pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21007    let mut b: poly8x16x4_t = b;
21008    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21009    b.0 = unsafe {
21010        simd_shuffle!(
21011            b.0,
21012            b.0,
21013            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21014        )
21015    };
21016    b.1 = unsafe {
21017        simd_shuffle!(
21018            b.1,
21019            b.1,
21020            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21021        )
21022    };
21023    b.2 = unsafe {
21024        simd_shuffle!(
21025            b.2,
21026            b.2,
21027            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21028        )
21029    };
21030    b.3 = unsafe {
21031        simd_shuffle!(
21032            b.3,
21033            b.3,
21034            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21035        )
21036    };
21037    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21038    unsafe {
21039        let ret_val: poly8x8_t = transmute(vqtbx4(
21040            transmute(a),
21041            transmute(b.0),
21042            transmute(b.1),
21043            transmute(b.2),
21044            transmute(b.3),
21045            c,
21046        ));
21047        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21048    }
21049}
21050#[doc = "Extended table look-up"]
21051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21052#[inline]
21053#[cfg(target_endian = "little")]
21054#[target_feature(enable = "neon")]
21055#[cfg_attr(test, assert_instr(tbx))]
21056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21057pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21058    unsafe {
21059        transmute(vqtbx4q(
21060            transmute(a),
21061            transmute(b.0),
21062            transmute(b.1),
21063            transmute(b.2),
21064            transmute(b.3),
21065            c,
21066        ))
21067    }
21068}
21069#[doc = "Extended table look-up"]
21070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21071#[inline]
21072#[cfg(target_endian = "big")]
21073#[target_feature(enable = "neon")]
21074#[cfg_attr(test, assert_instr(tbx))]
21075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21076pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21077    let mut b: poly8x16x4_t = b;
21078    let a: poly8x16_t =
21079        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21080    b.0 = unsafe {
21081        simd_shuffle!(
21082            b.0,
21083            b.0,
21084            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21085        )
21086    };
21087    b.1 = unsafe {
21088        simd_shuffle!(
21089            b.1,
21090            b.1,
21091            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21092        )
21093    };
21094    b.2 = unsafe {
21095        simd_shuffle!(
21096            b.2,
21097            b.2,
21098            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21099        )
21100    };
21101    b.3 = unsafe {
21102        simd_shuffle!(
21103            b.3,
21104            b.3,
21105            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21106        )
21107    };
21108    let c: uint8x16_t =
21109        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21110    unsafe {
21111        let ret_val: poly8x16_t = transmute(vqtbx4q(
21112            transmute(a),
21113            transmute(b.0),
21114            transmute(b.1),
21115            transmute(b.2),
21116            transmute(b.3),
21117            c,
21118        ));
21119        simd_shuffle!(
21120            ret_val,
21121            ret_val,
21122            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21123        )
21124    }
21125}
21126#[doc = "Rotate and exclusive OR"]
21127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21128#[inline]
21129#[target_feature(enable = "neon,sha3")]
21130#[cfg_attr(test, assert_instr(rax1))]
21131#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21132pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21133    unsafe extern "unadjusted" {
21134        #[cfg_attr(
21135            any(target_arch = "aarch64", target_arch = "arm64ec"),
21136            link_name = "llvm.aarch64.crypto.rax1"
21137        )]
21138        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21139    }
21140    unsafe { _vrax1q_u64(a, b) }
21141}
21142#[doc = "Reverse bit order"]
21143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21144#[inline]
21145#[target_feature(enable = "neon")]
21146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21147#[cfg_attr(test, assert_instr(rbit))]
21148pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21149    unsafe { simd_bitreverse(a) }
21150}
21151#[doc = "Reverse bit order"]
21152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21153#[inline]
21154#[target_feature(enable = "neon")]
21155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21156#[cfg_attr(test, assert_instr(rbit))]
21157pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21158    unsafe { simd_bitreverse(a) }
21159}
21160#[doc = "Reverse bit order"]
21161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21162#[inline]
21163#[cfg(target_endian = "little")]
21164#[target_feature(enable = "neon")]
21165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21166#[cfg_attr(test, assert_instr(rbit))]
21167pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21168    unsafe { transmute(vrbit_s8(transmute(a))) }
21169}
21170#[doc = "Reverse bit order"]
21171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21172#[inline]
21173#[cfg(target_endian = "big")]
21174#[target_feature(enable = "neon")]
21175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21176#[cfg_attr(test, assert_instr(rbit))]
21177pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21178    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21179    unsafe {
21180        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21181        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21182    }
21183}
21184#[doc = "Reverse bit order"]
21185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21186#[inline]
21187#[cfg(target_endian = "little")]
21188#[target_feature(enable = "neon")]
21189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21190#[cfg_attr(test, assert_instr(rbit))]
21191pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21192    unsafe { transmute(vrbitq_s8(transmute(a))) }
21193}
21194#[doc = "Reverse bit order"]
21195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21196#[inline]
21197#[cfg(target_endian = "big")]
21198#[target_feature(enable = "neon")]
21199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21200#[cfg_attr(test, assert_instr(rbit))]
21201pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21202    let a: uint8x16_t =
21203        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21204    unsafe {
21205        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21206        simd_shuffle!(
21207            ret_val,
21208            ret_val,
21209            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21210        )
21211    }
21212}
21213#[doc = "Reverse bit order"]
21214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21215#[inline]
21216#[cfg(target_endian = "little")]
21217#[target_feature(enable = "neon")]
21218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21219#[cfg_attr(test, assert_instr(rbit))]
21220pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21221    unsafe { transmute(vrbit_s8(transmute(a))) }
21222}
21223#[doc = "Reverse bit order"]
21224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21225#[inline]
21226#[cfg(target_endian = "big")]
21227#[target_feature(enable = "neon")]
21228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21229#[cfg_attr(test, assert_instr(rbit))]
21230pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21231    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21232    unsafe {
21233        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21234        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21235    }
21236}
21237#[doc = "Reverse bit order"]
21238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21239#[inline]
21240#[cfg(target_endian = "little")]
21241#[target_feature(enable = "neon")]
21242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21243#[cfg_attr(test, assert_instr(rbit))]
21244pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21245    unsafe { transmute(vrbitq_s8(transmute(a))) }
21246}
21247#[doc = "Reverse bit order"]
21248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21249#[inline]
21250#[cfg(target_endian = "big")]
21251#[target_feature(enable = "neon")]
21252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21253#[cfg_attr(test, assert_instr(rbit))]
21254pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21255    let a: poly8x16_t =
21256        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21257    unsafe {
21258        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21259        simd_shuffle!(
21260            ret_val,
21261            ret_val,
21262            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21263        )
21264    }
21265}
21266#[doc = "Reciprocal estimate."]
21267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21268#[inline]
21269#[target_feature(enable = "neon")]
21270#[cfg_attr(test, assert_instr(frecpe))]
21271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21272pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21273    unsafe extern "unadjusted" {
21274        #[cfg_attr(
21275            any(target_arch = "aarch64", target_arch = "arm64ec"),
21276            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21277        )]
21278        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21279    }
21280    unsafe { _vrecpe_f64(a) }
21281}
21282#[doc = "Reciprocal estimate."]
21283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21284#[inline]
21285#[target_feature(enable = "neon")]
21286#[cfg_attr(test, assert_instr(frecpe))]
21287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21288pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21289    unsafe extern "unadjusted" {
21290        #[cfg_attr(
21291            any(target_arch = "aarch64", target_arch = "arm64ec"),
21292            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21293        )]
21294        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21295    }
21296    unsafe { _vrecpeq_f64(a) }
21297}
21298#[doc = "Reciprocal estimate."]
21299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21300#[inline]
21301#[target_feature(enable = "neon")]
21302#[cfg_attr(test, assert_instr(frecpe))]
21303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21304pub fn vrecped_f64(a: f64) -> f64 {
21305    unsafe extern "unadjusted" {
21306        #[cfg_attr(
21307            any(target_arch = "aarch64", target_arch = "arm64ec"),
21308            link_name = "llvm.aarch64.neon.frecpe.f64"
21309        )]
21310        fn _vrecped_f64(a: f64) -> f64;
21311    }
21312    unsafe { _vrecped_f64(a) }
21313}
21314#[doc = "Reciprocal estimate."]
21315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21316#[inline]
21317#[target_feature(enable = "neon")]
21318#[cfg_attr(test, assert_instr(frecpe))]
21319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21320pub fn vrecpes_f32(a: f32) -> f32 {
21321    unsafe extern "unadjusted" {
21322        #[cfg_attr(
21323            any(target_arch = "aarch64", target_arch = "arm64ec"),
21324            link_name = "llvm.aarch64.neon.frecpe.f32"
21325        )]
21326        fn _vrecpes_f32(a: f32) -> f32;
21327    }
21328    unsafe { _vrecpes_f32(a) }
21329}
21330#[doc = "Reciprocal estimate."]
21331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21332#[inline]
21333#[cfg_attr(test, assert_instr(frecpe))]
21334#[target_feature(enable = "neon,fp16")]
21335#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21336#[cfg(not(target_arch = "arm64ec"))]
21337pub fn vrecpeh_f16(a: f16) -> f16 {
21338    unsafe extern "unadjusted" {
21339        #[cfg_attr(
21340            any(target_arch = "aarch64", target_arch = "arm64ec"),
21341            link_name = "llvm.aarch64.neon.frecpe.f16"
21342        )]
21343        fn _vrecpeh_f16(a: f16) -> f16;
21344    }
21345    unsafe { _vrecpeh_f16(a) }
21346}
21347#[doc = "Floating-point reciprocal step"]
21348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21349#[inline]
21350#[target_feature(enable = "neon")]
21351#[cfg_attr(test, assert_instr(frecps))]
21352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21353pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21354    unsafe extern "unadjusted" {
21355        #[cfg_attr(
21356            any(target_arch = "aarch64", target_arch = "arm64ec"),
21357            link_name = "llvm.aarch64.neon.frecps.v1f64"
21358        )]
21359        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21360    }
21361    unsafe { _vrecps_f64(a, b) }
21362}
21363#[doc = "Floating-point reciprocal step"]
21364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21365#[inline]
21366#[target_feature(enable = "neon")]
21367#[cfg_attr(test, assert_instr(frecps))]
21368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21369pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21370    unsafe extern "unadjusted" {
21371        #[cfg_attr(
21372            any(target_arch = "aarch64", target_arch = "arm64ec"),
21373            link_name = "llvm.aarch64.neon.frecps.v2f64"
21374        )]
21375        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21376    }
21377    unsafe { _vrecpsq_f64(a, b) }
21378}
21379#[doc = "Floating-point reciprocal step"]
21380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21381#[inline]
21382#[target_feature(enable = "neon")]
21383#[cfg_attr(test, assert_instr(frecps))]
21384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21385pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21386    unsafe extern "unadjusted" {
21387        #[cfg_attr(
21388            any(target_arch = "aarch64", target_arch = "arm64ec"),
21389            link_name = "llvm.aarch64.neon.frecps.f64"
21390        )]
21391        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21392    }
21393    unsafe { _vrecpsd_f64(a, b) }
21394}
21395#[doc = "Floating-point reciprocal step"]
21396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21397#[inline]
21398#[target_feature(enable = "neon")]
21399#[cfg_attr(test, assert_instr(frecps))]
21400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21401pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21402    unsafe extern "unadjusted" {
21403        #[cfg_attr(
21404            any(target_arch = "aarch64", target_arch = "arm64ec"),
21405            link_name = "llvm.aarch64.neon.frecps.f32"
21406        )]
21407        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21408    }
21409    unsafe { _vrecpss_f32(a, b) }
21410}
21411#[doc = "Floating-point reciprocal step"]
21412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21413#[inline]
21414#[cfg_attr(test, assert_instr(frecps))]
21415#[target_feature(enable = "neon,fp16")]
21416#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21417#[cfg(not(target_arch = "arm64ec"))]
21418pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21419    unsafe extern "unadjusted" {
21420        #[cfg_attr(
21421            any(target_arch = "aarch64", target_arch = "arm64ec"),
21422            link_name = "llvm.aarch64.neon.frecps.f16"
21423        )]
21424        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21425    }
21426    unsafe { _vrecpsh_f16(a, b) }
21427}
21428#[doc = "Floating-point reciprocal exponent"]
21429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21430#[inline]
21431#[target_feature(enable = "neon")]
21432#[cfg_attr(test, assert_instr(frecpx))]
21433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21434pub fn vrecpxd_f64(a: f64) -> f64 {
21435    unsafe extern "unadjusted" {
21436        #[cfg_attr(
21437            any(target_arch = "aarch64", target_arch = "arm64ec"),
21438            link_name = "llvm.aarch64.neon.frecpx.f64"
21439        )]
21440        fn _vrecpxd_f64(a: f64) -> f64;
21441    }
21442    unsafe { _vrecpxd_f64(a) }
21443}
21444#[doc = "Floating-point reciprocal exponent"]
21445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21446#[inline]
21447#[target_feature(enable = "neon")]
21448#[cfg_attr(test, assert_instr(frecpx))]
21449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21450pub fn vrecpxs_f32(a: f32) -> f32 {
21451    unsafe extern "unadjusted" {
21452        #[cfg_attr(
21453            any(target_arch = "aarch64", target_arch = "arm64ec"),
21454            link_name = "llvm.aarch64.neon.frecpx.f32"
21455        )]
21456        fn _vrecpxs_f32(a: f32) -> f32;
21457    }
21458    unsafe { _vrecpxs_f32(a) }
21459}
21460#[doc = "Floating-point reciprocal exponent"]
21461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21462#[inline]
21463#[cfg_attr(test, assert_instr(frecpx))]
21464#[target_feature(enable = "neon,fp16")]
21465#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21466#[cfg(not(target_arch = "arm64ec"))]
21467pub fn vrecpxh_f16(a: f16) -> f16 {
21468    unsafe extern "unadjusted" {
21469        #[cfg_attr(
21470            any(target_arch = "aarch64", target_arch = "arm64ec"),
21471            link_name = "llvm.aarch64.neon.frecpx.f16"
21472        )]
21473        fn _vrecpxh_f16(a: f16) -> f16;
21474    }
21475    unsafe { _vrecpxh_f16(a) }
21476}
21477#[doc = "Vector reinterpret cast operation"]
21478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21479#[inline]
21480#[target_feature(enable = "neon,fp16")]
21481#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21482#[cfg(not(target_arch = "arm64ec"))]
21483#[cfg_attr(test, assert_instr(nop))]
21484pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21485    unsafe { transmute(a) }
21486}
21487#[doc = "Vector reinterpret cast operation"]
21488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21489#[inline]
21490#[target_feature(enable = "neon,fp16")]
21491#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21492#[cfg(not(target_arch = "arm64ec"))]
21493#[cfg_attr(test, assert_instr(nop))]
21494pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21495    unsafe { transmute(a) }
21496}
21497#[doc = "Vector reinterpret cast operation"]
21498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21499#[inline]
21500#[target_feature(enable = "neon,fp16")]
21501#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21502#[cfg(not(target_arch = "arm64ec"))]
21503#[cfg_attr(test, assert_instr(nop))]
21504pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21505    unsafe { transmute(a) }
21506}
21507#[doc = "Vector reinterpret cast operation"]
21508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21509#[inline]
21510#[target_feature(enable = "neon,fp16")]
21511#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21512#[cfg(not(target_arch = "arm64ec"))]
21513#[cfg_attr(test, assert_instr(nop))]
21514pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21515    unsafe { transmute(a) }
21516}
21517#[doc = "Vector reinterpret cast operation"]
21518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21519#[inline]
21520#[target_feature(enable = "neon")]
21521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21522#[cfg_attr(test, assert_instr(nop))]
21523pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21524    unsafe { transmute(a) }
21525}
21526#[doc = "Vector reinterpret cast operation"]
21527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21528#[inline]
21529#[target_feature(enable = "neon")]
21530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21531#[cfg_attr(test, assert_instr(nop))]
21532pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21533    unsafe { transmute(a) }
21534}
21535#[doc = "Vector reinterpret cast operation"]
21536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21537#[inline]
21538#[target_feature(enable = "neon")]
21539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21540#[cfg_attr(test, assert_instr(nop))]
21541pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21542    unsafe { transmute(a) }
21543}
21544#[doc = "Vector reinterpret cast operation"]
21545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21546#[inline]
21547#[target_feature(enable = "neon")]
21548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21549#[cfg_attr(test, assert_instr(nop))]
21550pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21551    unsafe { transmute(a) }
21552}
21553#[doc = "Vector reinterpret cast operation"]
21554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21555#[inline]
21556#[target_feature(enable = "neon")]
21557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21558#[cfg_attr(test, assert_instr(nop))]
21559pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21560    unsafe { transmute(a) }
21561}
21562#[doc = "Vector reinterpret cast operation"]
21563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21564#[inline]
21565#[target_feature(enable = "neon")]
21566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21567#[cfg_attr(test, assert_instr(nop))]
21568pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21569    unsafe { transmute(a) }
21570}
21571#[doc = "Vector reinterpret cast operation"]
21572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21573#[inline]
21574#[target_feature(enable = "neon")]
21575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21576#[cfg_attr(test, assert_instr(nop))]
21577pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21578    unsafe { transmute(a) }
21579}
21580#[doc = "Vector reinterpret cast operation"]
21581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21582#[inline]
21583#[target_feature(enable = "neon")]
21584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21585#[cfg_attr(test, assert_instr(nop))]
21586pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21587    unsafe { transmute(a) }
21588}
21589#[doc = "Vector reinterpret cast operation"]
21590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21591#[inline]
21592#[target_feature(enable = "neon")]
21593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21594#[cfg_attr(test, assert_instr(nop))]
21595pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21596    unsafe { transmute(a) }
21597}
21598#[doc = "Vector reinterpret cast operation"]
21599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
21600#[inline]
21601#[target_feature(enable = "neon")]
21602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21603#[cfg_attr(test, assert_instr(nop))]
21604pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
21605    unsafe { transmute(a) }
21606}
21607#[doc = "Vector reinterpret cast operation"]
21608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21609#[inline]
21610#[target_feature(enable = "neon")]
21611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21612#[cfg_attr(test, assert_instr(nop))]
21613pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21614    unsafe { transmute(a) }
21615}
21616#[doc = "Vector reinterpret cast operation"]
21617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
21618#[inline]
21619#[target_feature(enable = "neon")]
21620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21621#[cfg_attr(test, assert_instr(nop))]
21622pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
21623    unsafe { transmute(a) }
21624}
21625#[doc = "Vector reinterpret cast operation"]
21626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
21627#[inline]
21628#[target_feature(enable = "neon")]
21629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21630#[cfg_attr(test, assert_instr(nop))]
21631pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
21632    unsafe { transmute(a) }
21633}
21634#[doc = "Vector reinterpret cast operation"]
21635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
21636#[inline]
21637#[target_feature(enable = "neon")]
21638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21639#[cfg_attr(test, assert_instr(nop))]
21640pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
21641    unsafe { transmute(a) }
21642}
21643#[doc = "Vector reinterpret cast operation"]
21644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21645#[inline]
21646#[target_feature(enable = "neon")]
21647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21648#[cfg_attr(test, assert_instr(nop))]
21649pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21650    unsafe { transmute(a) }
21651}
21652#[doc = "Vector reinterpret cast operation"]
21653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21654#[inline]
21655#[target_feature(enable = "neon")]
21656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21657#[cfg_attr(test, assert_instr(nop))]
21658pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21659    unsafe { transmute(a) }
21660}
21661#[doc = "Vector reinterpret cast operation"]
21662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
21663#[inline]
21664#[target_feature(enable = "neon")]
21665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21666#[cfg_attr(test, assert_instr(nop))]
21667pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
21668    unsafe { transmute(a) }
21669}
21670#[doc = "Vector reinterpret cast operation"]
21671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21672#[inline]
21673#[target_feature(enable = "neon")]
21674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21675#[cfg_attr(test, assert_instr(nop))]
21676pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21677    unsafe { transmute(a) }
21678}
21679#[doc = "Vector reinterpret cast operation"]
21680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21681#[inline]
21682#[target_feature(enable = "neon")]
21683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21684#[cfg_attr(test, assert_instr(nop))]
21685pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21686    unsafe { transmute(a) }
21687}
21688#[doc = "Vector reinterpret cast operation"]
21689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21690#[inline]
21691#[target_feature(enable = "neon")]
21692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21693#[cfg_attr(test, assert_instr(nop))]
21694pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21695    unsafe { transmute(a) }
21696}
21697#[doc = "Vector reinterpret cast operation"]
21698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
21699#[inline]
21700#[target_feature(enable = "neon")]
21701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21702#[cfg_attr(test, assert_instr(nop))]
21703pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
21704    unsafe { transmute(a) }
21705}
21706#[doc = "Vector reinterpret cast operation"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
21708#[inline]
21709#[target_feature(enable = "neon")]
21710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21711#[cfg_attr(test, assert_instr(nop))]
21712pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
21713    unsafe { transmute(a) }
21714}
21715#[doc = "Vector reinterpret cast operation"]
21716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
21717#[inline]
21718#[target_feature(enable = "neon")]
21719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21720#[cfg_attr(test, assert_instr(nop))]
21721pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
21722    unsafe { transmute(a) }
21723}
21724#[doc = "Vector reinterpret cast operation"]
21725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
21726#[inline]
21727#[target_feature(enable = "neon")]
21728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21729#[cfg_attr(test, assert_instr(nop))]
21730pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
21731    unsafe { transmute(a) }
21732}
21733#[doc = "Vector reinterpret cast operation"]
21734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
21735#[inline]
21736#[target_feature(enable = "neon")]
21737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21738#[cfg_attr(test, assert_instr(nop))]
21739pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
21740    unsafe { transmute(a) }
21741}
21742#[doc = "Vector reinterpret cast operation"]
21743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
21744#[inline]
21745#[target_feature(enable = "neon")]
21746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21747#[cfg_attr(test, assert_instr(nop))]
21748pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
21749    unsafe { transmute(a) }
21750}
21751#[doc = "Vector reinterpret cast operation"]
21752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
21753#[inline]
21754#[target_feature(enable = "neon")]
21755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21756#[cfg_attr(test, assert_instr(nop))]
21757pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
21758    unsafe { transmute(a) }
21759}
21760#[doc = "Vector reinterpret cast operation"]
21761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
21762#[inline]
21763#[target_feature(enable = "neon")]
21764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21765#[cfg_attr(test, assert_instr(nop))]
21766pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
21767    unsafe { transmute(a) }
21768}
21769#[doc = "Vector reinterpret cast operation"]
21770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21771#[inline]
21772#[target_feature(enable = "neon")]
21773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21774#[cfg_attr(test, assert_instr(nop))]
21775pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21776    unsafe { transmute(a) }
21777}
21778#[doc = "Vector reinterpret cast operation"]
21779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21780#[inline]
21781#[target_feature(enable = "neon")]
21782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21783#[cfg_attr(test, assert_instr(nop))]
21784pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21785    unsafe { transmute(a) }
21786}
21787#[doc = "Vector reinterpret cast operation"]
21788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21789#[inline]
21790#[target_feature(enable = "neon")]
21791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21792#[cfg_attr(test, assert_instr(nop))]
21793pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21794    unsafe { transmute(a) }
21795}
21796#[doc = "Vector reinterpret cast operation"]
21797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21798#[inline]
21799#[target_feature(enable = "neon")]
21800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21801#[cfg_attr(test, assert_instr(nop))]
21802pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21803    unsafe { transmute(a) }
21804}
21805#[doc = "Vector reinterpret cast operation"]
21806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21807#[inline]
21808#[target_feature(enable = "neon")]
21809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21810#[cfg_attr(test, assert_instr(nop))]
21811pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21812    unsafe { transmute(a) }
21813}
21814#[doc = "Vector reinterpret cast operation"]
21815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21816#[inline]
21817#[target_feature(enable = "neon")]
21818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21819#[cfg_attr(test, assert_instr(nop))]
21820pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21821    unsafe { transmute(a) }
21822}
21823#[doc = "Vector reinterpret cast operation"]
21824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21825#[inline]
21826#[target_feature(enable = "neon")]
21827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21828#[cfg_attr(test, assert_instr(nop))]
21829pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21830    unsafe { transmute(a) }
21831}
21832#[doc = "Vector reinterpret cast operation"]
21833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21834#[inline]
21835#[target_feature(enable = "neon")]
21836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21837#[cfg_attr(test, assert_instr(nop))]
21838pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21839    unsafe { transmute(a) }
21840}
21841#[doc = "Vector reinterpret cast operation"]
21842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
21843#[inline]
21844#[target_feature(enable = "neon")]
21845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21846#[cfg_attr(test, assert_instr(nop))]
21847pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
21848    unsafe { transmute(a) }
21849}
21850#[doc = "Vector reinterpret cast operation"]
21851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
21852#[inline]
21853#[target_feature(enable = "neon")]
21854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21855#[cfg_attr(test, assert_instr(nop))]
21856pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
21857    unsafe { transmute(a) }
21858}
21859#[doc = "Vector reinterpret cast operation"]
21860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21861#[inline]
21862#[target_feature(enable = "neon")]
21863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21864#[cfg_attr(test, assert_instr(nop))]
21865pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21866    unsafe { transmute(a) }
21867}
21868#[doc = "Vector reinterpret cast operation"]
21869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21870#[inline]
21871#[target_feature(enable = "neon")]
21872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21873#[cfg_attr(test, assert_instr(nop))]
21874pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21875    unsafe { transmute(a) }
21876}
21877#[doc = "Vector reinterpret cast operation"]
21878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21879#[inline]
21880#[target_feature(enable = "neon")]
21881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21882#[cfg_attr(test, assert_instr(nop))]
21883pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21884    unsafe { transmute(a) }
21885}
21886#[doc = "Vector reinterpret cast operation"]
21887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21888#[inline]
21889#[target_feature(enable = "neon")]
21890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21891#[cfg_attr(test, assert_instr(nop))]
21892pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21893    unsafe { transmute(a) }
21894}
21895#[doc = "Vector reinterpret cast operation"]
21896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21897#[inline]
21898#[target_feature(enable = "neon")]
21899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21900#[cfg_attr(test, assert_instr(nop))]
21901pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21902    unsafe { transmute(a) }
21903}
21904#[doc = "Vector reinterpret cast operation"]
21905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21906#[inline]
21907#[target_feature(enable = "neon")]
21908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21909#[cfg_attr(test, assert_instr(nop))]
21910pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21911    unsafe { transmute(a) }
21912}
21913#[doc = "Vector reinterpret cast operation"]
21914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21915#[inline]
21916#[target_feature(enable = "neon")]
21917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21918#[cfg_attr(test, assert_instr(nop))]
21919pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21920    unsafe { transmute(a) }
21921}
21922#[doc = "Vector reinterpret cast operation"]
21923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21924#[inline]
21925#[target_feature(enable = "neon")]
21926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21927#[cfg_attr(test, assert_instr(nop))]
21928pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21929    unsafe { transmute(a) }
21930}
21931#[doc = "Vector reinterpret cast operation"]
21932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
21933#[inline]
21934#[target_feature(enable = "neon")]
21935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21936#[cfg_attr(test, assert_instr(nop))]
21937pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
21938    unsafe { transmute(a) }
21939}
21940#[doc = "Vector reinterpret cast operation"]
21941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
21942#[inline]
21943#[target_feature(enable = "neon")]
21944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21945#[cfg_attr(test, assert_instr(nop))]
21946pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
21947    unsafe { transmute(a) }
21948}
21949#[doc = "Vector reinterpret cast operation"]
21950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21951#[inline]
21952#[target_feature(enable = "neon")]
21953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21954#[cfg_attr(test, assert_instr(nop))]
21955pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21956    unsafe { transmute(a) }
21957}
21958#[doc = "Vector reinterpret cast operation"]
21959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21960#[inline]
21961#[target_feature(enable = "neon")]
21962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21963#[cfg_attr(test, assert_instr(nop))]
21964pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21965    unsafe { transmute(a) }
21966}
21967#[doc = "Vector reinterpret cast operation"]
21968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21969#[inline]
21970#[target_feature(enable = "neon")]
21971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21972#[cfg_attr(test, assert_instr(nop))]
21973pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21974    unsafe { transmute(a) }
21975}
21976#[doc = "Vector reinterpret cast operation"]
21977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21978#[inline]
21979#[target_feature(enable = "neon")]
21980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21981#[cfg_attr(test, assert_instr(nop))]
21982pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21983    unsafe { transmute(a) }
21984}
21985#[doc = "Vector reinterpret cast operation"]
21986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21987#[inline]
21988#[target_feature(enable = "neon")]
21989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21990#[cfg_attr(test, assert_instr(nop))]
21991pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21992    unsafe { transmute(a) }
21993}
21994#[doc = "Vector reinterpret cast operation"]
21995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21996#[inline]
21997#[target_feature(enable = "neon")]
21998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21999#[cfg_attr(test, assert_instr(nop))]
22000pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22001    unsafe { transmute(a) }
22002}
22003#[doc = "Vector reinterpret cast operation"]
22004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22005#[inline]
22006#[target_feature(enable = "neon")]
22007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22008#[cfg_attr(test, assert_instr(nop))]
22009pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22010    unsafe { transmute(a) }
22011}
22012#[doc = "Vector reinterpret cast operation"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
22014#[inline]
22015#[target_feature(enable = "neon")]
22016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22017#[cfg_attr(test, assert_instr(nop))]
22018pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
22019    unsafe { transmute(a) }
22020}
22021#[doc = "Vector reinterpret cast operation"]
22022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
22023#[inline]
22024#[target_feature(enable = "neon")]
22025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22026#[cfg_attr(test, assert_instr(nop))]
22027pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
22028    unsafe { transmute(a) }
22029}
22030#[doc = "Vector reinterpret cast operation"]
22031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
22032#[inline]
22033#[target_feature(enable = "neon")]
22034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22035#[cfg_attr(test, assert_instr(nop))]
22036pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
22037    unsafe { transmute(a) }
22038}
22039#[doc = "Vector reinterpret cast operation"]
22040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22041#[inline]
22042#[target_feature(enable = "neon")]
22043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22044#[cfg_attr(test, assert_instr(nop))]
22045pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22046    unsafe { transmute(a) }
22047}
22048#[doc = "Vector reinterpret cast operation"]
22049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
22050#[inline]
22051#[target_feature(enable = "neon")]
22052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22053#[cfg_attr(test, assert_instr(nop))]
22054pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
22055    unsafe { transmute(a) }
22056}
22057#[doc = "Vector reinterpret cast operation"]
22058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
22059#[inline]
22060#[target_feature(enable = "neon")]
22061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22062#[cfg_attr(test, assert_instr(nop))]
22063pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
22064    unsafe { transmute(a) }
22065}
22066#[doc = "Vector reinterpret cast operation"]
22067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
22068#[inline]
22069#[target_feature(enable = "neon")]
22070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22071#[cfg_attr(test, assert_instr(nop))]
22072pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22073    unsafe { transmute(a) }
22074}
22075#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
22077#[inline]
22078#[target_feature(enable = "neon,frintts")]
22079#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22080#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22081pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
22082    unsafe extern "unadjusted" {
22083        #[cfg_attr(
22084            any(target_arch = "aarch64", target_arch = "arm64ec"),
22085            link_name = "llvm.aarch64.neon.frint32x.v2f32"
22086        )]
22087        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
22088    }
22089    unsafe { _vrnd32x_f32(a) }
22090}
22091#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
22093#[inline]
22094#[target_feature(enable = "neon,frintts")]
22095#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22096#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22097pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
22098    unsafe extern "unadjusted" {
22099        #[cfg_attr(
22100            any(target_arch = "aarch64", target_arch = "arm64ec"),
22101            link_name = "llvm.aarch64.neon.frint32x.v4f32"
22102        )]
22103        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
22104    }
22105    unsafe { _vrnd32xq_f32(a) }
22106}
22107#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
22109#[inline]
22110#[target_feature(enable = "neon,frintts")]
22111#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22112#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22113pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
22114    unsafe extern "unadjusted" {
22115        #[cfg_attr(
22116            any(target_arch = "aarch64", target_arch = "arm64ec"),
22117            link_name = "llvm.aarch64.neon.frint32x.v2f64"
22118        )]
22119        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
22120    }
22121    unsafe { _vrnd32xq_f64(a) }
22122}
22123#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
22125#[inline]
22126#[target_feature(enable = "neon,frintts")]
22127#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22128#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22129pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
22130    unsafe extern "unadjusted" {
22131        #[cfg_attr(
22132            any(target_arch = "aarch64", target_arch = "arm64ec"),
22133            link_name = "llvm.aarch64.frint32x.f64"
22134        )]
22135        fn _vrnd32x_f64(a: f64) -> f64;
22136    }
22137    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
22138}
22139#[doc = "Floating-point round to 32-bit integer toward zero"]
22140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
22141#[inline]
22142#[target_feature(enable = "neon,frintts")]
22143#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22144#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22145pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
22146    unsafe extern "unadjusted" {
22147        #[cfg_attr(
22148            any(target_arch = "aarch64", target_arch = "arm64ec"),
22149            link_name = "llvm.aarch64.neon.frint32z.v2f32"
22150        )]
22151        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
22152    }
22153    unsafe { _vrnd32z_f32(a) }
22154}
22155#[doc = "Floating-point round to 32-bit integer toward zero"]
22156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
22157#[inline]
22158#[target_feature(enable = "neon,frintts")]
22159#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22160#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22161pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
22162    unsafe extern "unadjusted" {
22163        #[cfg_attr(
22164            any(target_arch = "aarch64", target_arch = "arm64ec"),
22165            link_name = "llvm.aarch64.neon.frint32z.v4f32"
22166        )]
22167        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
22168    }
22169    unsafe { _vrnd32zq_f32(a) }
22170}
22171#[doc = "Floating-point round to 32-bit integer toward zero"]
22172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
22173#[inline]
22174#[target_feature(enable = "neon,frintts")]
22175#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22176#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22177pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
22178    unsafe extern "unadjusted" {
22179        #[cfg_attr(
22180            any(target_arch = "aarch64", target_arch = "arm64ec"),
22181            link_name = "llvm.aarch64.neon.frint32z.v2f64"
22182        )]
22183        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
22184    }
22185    unsafe { _vrnd32zq_f64(a) }
22186}
22187#[doc = "Floating-point round to 32-bit integer toward zero"]
22188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
22189#[inline]
22190#[target_feature(enable = "neon,frintts")]
22191#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22192#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22193pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
22194    unsafe extern "unadjusted" {
22195        #[cfg_attr(
22196            any(target_arch = "aarch64", target_arch = "arm64ec"),
22197            link_name = "llvm.aarch64.frint32z.f64"
22198        )]
22199        fn _vrnd32z_f64(a: f64) -> f64;
22200    }
22201    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
22202}
22203#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
22205#[inline]
22206#[target_feature(enable = "neon,frintts")]
22207#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22208#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22209pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
22210    unsafe extern "unadjusted" {
22211        #[cfg_attr(
22212            any(target_arch = "aarch64", target_arch = "arm64ec"),
22213            link_name = "llvm.aarch64.neon.frint64x.v2f32"
22214        )]
22215        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
22216    }
22217    unsafe { _vrnd64x_f32(a) }
22218}
22219#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
22221#[inline]
22222#[target_feature(enable = "neon,frintts")]
22223#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22224#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22225pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
22226    unsafe extern "unadjusted" {
22227        #[cfg_attr(
22228            any(target_arch = "aarch64", target_arch = "arm64ec"),
22229            link_name = "llvm.aarch64.neon.frint64x.v4f32"
22230        )]
22231        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
22232    }
22233    unsafe { _vrnd64xq_f32(a) }
22234}
22235#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
22237#[inline]
22238#[target_feature(enable = "neon,frintts")]
22239#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22240#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22241pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
22242    unsafe extern "unadjusted" {
22243        #[cfg_attr(
22244            any(target_arch = "aarch64", target_arch = "arm64ec"),
22245            link_name = "llvm.aarch64.neon.frint64x.v2f64"
22246        )]
22247        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
22248    }
22249    unsafe { _vrnd64xq_f64(a) }
22250}
22251#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
22253#[inline]
22254#[target_feature(enable = "neon,frintts")]
22255#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22256#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22257pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
22258    unsafe extern "unadjusted" {
22259        #[cfg_attr(
22260            any(target_arch = "aarch64", target_arch = "arm64ec"),
22261            link_name = "llvm.aarch64.frint64x.f64"
22262        )]
22263        fn _vrnd64x_f64(a: f64) -> f64;
22264    }
22265    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
22266}
22267#[doc = "Floating-point round to 64-bit integer toward zero"]
22268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
22269#[inline]
22270#[target_feature(enable = "neon,frintts")]
22271#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22272#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22273pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
22274    unsafe extern "unadjusted" {
22275        #[cfg_attr(
22276            any(target_arch = "aarch64", target_arch = "arm64ec"),
22277            link_name = "llvm.aarch64.neon.frint64z.v2f32"
22278        )]
22279        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
22280    }
22281    unsafe { _vrnd64z_f32(a) }
22282}
22283#[doc = "Floating-point round to 64-bit integer toward zero"]
22284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
22285#[inline]
22286#[target_feature(enable = "neon,frintts")]
22287#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22288#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22289pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
22290    unsafe extern "unadjusted" {
22291        #[cfg_attr(
22292            any(target_arch = "aarch64", target_arch = "arm64ec"),
22293            link_name = "llvm.aarch64.neon.frint64z.v4f32"
22294        )]
22295        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
22296    }
22297    unsafe { _vrnd64zq_f32(a) }
22298}
22299#[doc = "Floating-point round to 64-bit integer toward zero"]
22300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
22301#[inline]
22302#[target_feature(enable = "neon,frintts")]
22303#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22304#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22305pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
22306    unsafe extern "unadjusted" {
22307        #[cfg_attr(
22308            any(target_arch = "aarch64", target_arch = "arm64ec"),
22309            link_name = "llvm.aarch64.neon.frint64z.v2f64"
22310        )]
22311        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
22312    }
22313    unsafe { _vrnd64zq_f64(a) }
22314}
22315#[doc = "Floating-point round to 64-bit integer toward zero"]
22316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
22317#[inline]
22318#[target_feature(enable = "neon,frintts")]
22319#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22320#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22321pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
22322    unsafe extern "unadjusted" {
22323        #[cfg_attr(
22324            any(target_arch = "aarch64", target_arch = "arm64ec"),
22325            link_name = "llvm.aarch64.frint64z.f64"
22326        )]
22327        fn _vrnd64z_f64(a: f64) -> f64;
22328    }
22329    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
22330}
22331#[doc = "Floating-point round to integral, toward zero"]
22332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
22333#[inline]
22334#[target_feature(enable = "neon,fp16")]
22335#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22336#[cfg(not(target_arch = "arm64ec"))]
22337#[cfg_attr(test, assert_instr(frintz))]
22338pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
22339    unsafe { simd_trunc(a) }
22340}
22341#[doc = "Floating-point round to integral, toward zero"]
22342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
22343#[inline]
22344#[target_feature(enable = "neon,fp16")]
22345#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22346#[cfg(not(target_arch = "arm64ec"))]
22347#[cfg_attr(test, assert_instr(frintz))]
22348pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
22349    unsafe { simd_trunc(a) }
22350}
22351#[doc = "Floating-point round to integral, toward zero"]
22352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
22353#[inline]
22354#[target_feature(enable = "neon")]
22355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22356#[cfg_attr(test, assert_instr(frintz))]
22357pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
22358    unsafe { simd_trunc(a) }
22359}
22360#[doc = "Floating-point round to integral, toward zero"]
22361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
22362#[inline]
22363#[target_feature(enable = "neon")]
22364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22365#[cfg_attr(test, assert_instr(frintz))]
22366pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
22367    unsafe { simd_trunc(a) }
22368}
22369#[doc = "Floating-point round to integral, toward zero"]
22370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
22371#[inline]
22372#[target_feature(enable = "neon")]
22373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22374#[cfg_attr(test, assert_instr(frintz))]
22375pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
22376    unsafe { simd_trunc(a) }
22377}
22378#[doc = "Floating-point round to integral, toward zero"]
22379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
22380#[inline]
22381#[target_feature(enable = "neon")]
22382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22383#[cfg_attr(test, assert_instr(frintz))]
22384pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
22385    unsafe { simd_trunc(a) }
22386}
22387#[doc = "Floating-point round to integral, to nearest with ties to away"]
22388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
22389#[inline]
22390#[target_feature(enable = "neon,fp16")]
22391#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22392#[cfg(not(target_arch = "arm64ec"))]
22393#[cfg_attr(test, assert_instr(frinta))]
22394pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
22395    unsafe { simd_round(a) }
22396}
22397#[doc = "Floating-point round to integral, to nearest with ties to away"]
22398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
22399#[inline]
22400#[target_feature(enable = "neon,fp16")]
22401#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22402#[cfg(not(target_arch = "arm64ec"))]
22403#[cfg_attr(test, assert_instr(frinta))]
22404pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
22405    unsafe { simd_round(a) }
22406}
22407#[doc = "Floating-point round to integral, to nearest with ties to away"]
22408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
22409#[inline]
22410#[target_feature(enable = "neon")]
22411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22412#[cfg_attr(test, assert_instr(frinta))]
22413pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
22414    unsafe { simd_round(a) }
22415}
22416#[doc = "Floating-point round to integral, to nearest with ties to away"]
22417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
22418#[inline]
22419#[target_feature(enable = "neon")]
22420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22421#[cfg_attr(test, assert_instr(frinta))]
22422pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
22423    unsafe { simd_round(a) }
22424}
22425#[doc = "Floating-point round to integral, to nearest with ties to away"]
22426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
22427#[inline]
22428#[target_feature(enable = "neon")]
22429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22430#[cfg_attr(test, assert_instr(frinta))]
22431pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
22432    unsafe { simd_round(a) }
22433}
22434#[doc = "Floating-point round to integral, to nearest with ties to away"]
22435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
22436#[inline]
22437#[target_feature(enable = "neon")]
22438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22439#[cfg_attr(test, assert_instr(frinta))]
22440pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
22441    unsafe { simd_round(a) }
22442}
22443#[doc = "Floating-point round to integral, to nearest with ties to away"]
22444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
22445#[inline]
22446#[target_feature(enable = "neon,fp16")]
22447#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22448#[cfg(not(target_arch = "arm64ec"))]
22449#[cfg_attr(test, assert_instr(frinta))]
22450pub fn vrndah_f16(a: f16) -> f16 {
22451    roundf16(a)
22452}
22453#[doc = "Floating-point round to integral, to nearest with ties to away"]
22454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
22455#[inline]
22456#[target_feature(enable = "neon,fp16")]
22457#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22458#[cfg(not(target_arch = "arm64ec"))]
22459#[cfg_attr(test, assert_instr(frintz))]
22460pub fn vrndh_f16(a: f16) -> f16 {
22461    truncf16(a)
22462}
22463#[doc = "Floating-point round to integral, using current rounding mode"]
22464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
22465#[inline]
22466#[target_feature(enable = "neon,fp16")]
22467#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22468#[cfg(not(target_arch = "arm64ec"))]
22469#[cfg_attr(test, assert_instr(frinti))]
22470pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
22471    unsafe extern "unadjusted" {
22472        #[cfg_attr(
22473            any(target_arch = "aarch64", target_arch = "arm64ec"),
22474            link_name = "llvm.nearbyint.v4f16"
22475        )]
22476        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
22477    }
22478    unsafe { _vrndi_f16(a) }
22479}
22480#[doc = "Floating-point round to integral, using current rounding mode"]
22481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
22482#[inline]
22483#[target_feature(enable = "neon,fp16")]
22484#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22485#[cfg(not(target_arch = "arm64ec"))]
22486#[cfg_attr(test, assert_instr(frinti))]
22487pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
22488    unsafe extern "unadjusted" {
22489        #[cfg_attr(
22490            any(target_arch = "aarch64", target_arch = "arm64ec"),
22491            link_name = "llvm.nearbyint.v8f16"
22492        )]
22493        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
22494    }
22495    unsafe { _vrndiq_f16(a) }
22496}
22497#[doc = "Floating-point round to integral, using current rounding mode"]
22498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
22499#[inline]
22500#[target_feature(enable = "neon")]
22501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22502#[cfg_attr(test, assert_instr(frinti))]
22503pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
22504    unsafe extern "unadjusted" {
22505        #[cfg_attr(
22506            any(target_arch = "aarch64", target_arch = "arm64ec"),
22507            link_name = "llvm.nearbyint.v2f32"
22508        )]
22509        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
22510    }
22511    unsafe { _vrndi_f32(a) }
22512}
22513#[doc = "Floating-point round to integral, using current rounding mode"]
22514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
22515#[inline]
22516#[target_feature(enable = "neon")]
22517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22518#[cfg_attr(test, assert_instr(frinti))]
22519pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
22520    unsafe extern "unadjusted" {
22521        #[cfg_attr(
22522            any(target_arch = "aarch64", target_arch = "arm64ec"),
22523            link_name = "llvm.nearbyint.v4f32"
22524        )]
22525        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
22526    }
22527    unsafe { _vrndiq_f32(a) }
22528}
22529#[doc = "Floating-point round to integral, using current rounding mode"]
22530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
22531#[inline]
22532#[target_feature(enable = "neon")]
22533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22534#[cfg_attr(test, assert_instr(frinti))]
22535pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
22536    unsafe extern "unadjusted" {
22537        #[cfg_attr(
22538            any(target_arch = "aarch64", target_arch = "arm64ec"),
22539            link_name = "llvm.nearbyint.v1f64"
22540        )]
22541        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
22542    }
22543    unsafe { _vrndi_f64(a) }
22544}
22545#[doc = "Floating-point round to integral, using current rounding mode"]
22546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
22547#[inline]
22548#[target_feature(enable = "neon")]
22549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22550#[cfg_attr(test, assert_instr(frinti))]
22551pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
22552    unsafe extern "unadjusted" {
22553        #[cfg_attr(
22554            any(target_arch = "aarch64", target_arch = "arm64ec"),
22555            link_name = "llvm.nearbyint.v2f64"
22556        )]
22557        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
22558    }
22559    unsafe { _vrndiq_f64(a) }
22560}
22561#[doc = "Floating-point round to integral, using current rounding mode"]
22562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
22563#[inline]
22564#[target_feature(enable = "neon,fp16")]
22565#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22566#[cfg(not(target_arch = "arm64ec"))]
22567#[cfg_attr(test, assert_instr(frinti))]
22568pub fn vrndih_f16(a: f16) -> f16 {
22569    unsafe extern "unadjusted" {
22570        #[cfg_attr(
22571            any(target_arch = "aarch64", target_arch = "arm64ec"),
22572            link_name = "llvm.nearbyint.f16"
22573        )]
22574        fn _vrndih_f16(a: f16) -> f16;
22575    }
22576    unsafe { _vrndih_f16(a) }
22577}
22578#[doc = "Floating-point round to integral, toward minus infinity"]
22579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
22580#[inline]
22581#[target_feature(enable = "neon,fp16")]
22582#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22583#[cfg(not(target_arch = "arm64ec"))]
22584#[cfg_attr(test, assert_instr(frintm))]
22585pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
22586    unsafe { simd_floor(a) }
22587}
22588#[doc = "Floating-point round to integral, toward minus infinity"]
22589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
22590#[inline]
22591#[target_feature(enable = "neon,fp16")]
22592#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22593#[cfg(not(target_arch = "arm64ec"))]
22594#[cfg_attr(test, assert_instr(frintm))]
22595pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
22596    unsafe { simd_floor(a) }
22597}
22598#[doc = "Floating-point round to integral, toward minus infinity"]
22599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
22600#[inline]
22601#[target_feature(enable = "neon")]
22602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22603#[cfg_attr(test, assert_instr(frintm))]
22604pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
22605    unsafe { simd_floor(a) }
22606}
22607#[doc = "Floating-point round to integral, toward minus infinity"]
22608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
22609#[inline]
22610#[target_feature(enable = "neon")]
22611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22612#[cfg_attr(test, assert_instr(frintm))]
22613pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
22614    unsafe { simd_floor(a) }
22615}
22616#[doc = "Floating-point round to integral, toward minus infinity"]
22617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
22618#[inline]
22619#[target_feature(enable = "neon")]
22620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22621#[cfg_attr(test, assert_instr(frintm))]
22622pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
22623    unsafe { simd_floor(a) }
22624}
22625#[doc = "Floating-point round to integral, toward minus infinity"]
22626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
22627#[inline]
22628#[target_feature(enable = "neon")]
22629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22630#[cfg_attr(test, assert_instr(frintm))]
22631pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
22632    unsafe { simd_floor(a) }
22633}
22634#[doc = "Floating-point round to integral, toward minus infinity"]
22635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
22636#[inline]
22637#[target_feature(enable = "neon,fp16")]
22638#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22639#[cfg(not(target_arch = "arm64ec"))]
22640#[cfg_attr(test, assert_instr(frintm))]
22641pub fn vrndmh_f16(a: f16) -> f16 {
22642    floorf16(a)
22643}
22644#[doc = "Floating-point round to integral, to nearest with ties to even"]
22645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
22646#[inline]
22647#[target_feature(enable = "neon")]
22648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22649#[cfg_attr(test, assert_instr(frintn))]
22650pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
22651    unsafe extern "unadjusted" {
22652        #[cfg_attr(
22653            any(target_arch = "aarch64", target_arch = "arm64ec"),
22654            link_name = "llvm.roundeven.v1f64"
22655        )]
22656        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
22657    }
22658    unsafe { _vrndn_f64(a) }
22659}
22660#[doc = "Floating-point round to integral, to nearest with ties to even"]
22661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
22662#[inline]
22663#[target_feature(enable = "neon")]
22664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22665#[cfg_attr(test, assert_instr(frintn))]
22666pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
22667    unsafe extern "unadjusted" {
22668        #[cfg_attr(
22669            any(target_arch = "aarch64", target_arch = "arm64ec"),
22670            link_name = "llvm.roundeven.v2f64"
22671        )]
22672        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
22673    }
22674    unsafe { _vrndnq_f64(a) }
22675}
22676#[doc = "Floating-point round to integral, toward minus infinity"]
22677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
22678#[inline]
22679#[target_feature(enable = "neon,fp16")]
22680#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22681#[cfg(not(target_arch = "arm64ec"))]
22682#[cfg_attr(test, assert_instr(frintn))]
22683pub fn vrndnh_f16(a: f16) -> f16 {
22684    unsafe extern "unadjusted" {
22685        #[cfg_attr(
22686            any(target_arch = "aarch64", target_arch = "arm64ec"),
22687            link_name = "llvm.roundeven.f16"
22688        )]
22689        fn _vrndnh_f16(a: f16) -> f16;
22690    }
22691    unsafe { _vrndnh_f16(a) }
22692}
22693#[doc = "Floating-point round to integral, to nearest with ties to even"]
22694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
22695#[inline]
22696#[target_feature(enable = "neon")]
22697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22698#[cfg_attr(test, assert_instr(frintn))]
22699pub fn vrndns_f32(a: f32) -> f32 {
22700    unsafe extern "unadjusted" {
22701        #[cfg_attr(
22702            any(target_arch = "aarch64", target_arch = "arm64ec"),
22703            link_name = "llvm.roundeven.f32"
22704        )]
22705        fn _vrndns_f32(a: f32) -> f32;
22706    }
22707    unsafe { _vrndns_f32(a) }
22708}
22709#[doc = "Floating-point round to integral, toward plus infinity"]
22710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
22711#[inline]
22712#[target_feature(enable = "neon,fp16")]
22713#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22714#[cfg(not(target_arch = "arm64ec"))]
22715#[cfg_attr(test, assert_instr(frintp))]
22716pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
22717    unsafe { simd_ceil(a) }
22718}
22719#[doc = "Floating-point round to integral, toward plus infinity"]
22720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
22721#[inline]
22722#[target_feature(enable = "neon,fp16")]
22723#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22724#[cfg(not(target_arch = "arm64ec"))]
22725#[cfg_attr(test, assert_instr(frintp))]
22726pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
22727    unsafe { simd_ceil(a) }
22728}
22729#[doc = "Floating-point round to integral, toward plus infinity"]
22730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
22731#[inline]
22732#[target_feature(enable = "neon")]
22733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22734#[cfg_attr(test, assert_instr(frintp))]
22735pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
22736    unsafe { simd_ceil(a) }
22737}
22738#[doc = "Floating-point round to integral, toward plus infinity"]
22739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
22740#[inline]
22741#[target_feature(enable = "neon")]
22742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22743#[cfg_attr(test, assert_instr(frintp))]
22744pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
22745    unsafe { simd_ceil(a) }
22746}
22747#[doc = "Floating-point round to integral, toward plus infinity"]
22748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
22749#[inline]
22750#[target_feature(enable = "neon")]
22751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22752#[cfg_attr(test, assert_instr(frintp))]
22753pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
22754    unsafe { simd_ceil(a) }
22755}
22756#[doc = "Floating-point round to integral, toward plus infinity"]
22757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
22758#[inline]
22759#[target_feature(enable = "neon")]
22760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22761#[cfg_attr(test, assert_instr(frintp))]
22762pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
22763    unsafe { simd_ceil(a) }
22764}
22765#[doc = "Floating-point round to integral, toward plus infinity"]
22766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
22767#[inline]
22768#[target_feature(enable = "neon,fp16")]
22769#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22770#[cfg(not(target_arch = "arm64ec"))]
22771#[cfg_attr(test, assert_instr(frintp))]
22772pub fn vrndph_f16(a: f16) -> f16 {
22773    ceilf16(a)
22774}
22775#[doc = "Floating-point round to integral exact, using current rounding mode"]
22776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
22777#[inline]
22778#[target_feature(enable = "neon,fp16")]
22779#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22780#[cfg(not(target_arch = "arm64ec"))]
22781#[cfg_attr(test, assert_instr(frintx))]
22782pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
22783    unsafe { simd_round_ties_even(a) }
22784}
22785#[doc = "Floating-point round to integral exact, using current rounding mode"]
22786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
22787#[inline]
22788#[target_feature(enable = "neon,fp16")]
22789#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22790#[cfg(not(target_arch = "arm64ec"))]
22791#[cfg_attr(test, assert_instr(frintx))]
22792pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
22793    unsafe { simd_round_ties_even(a) }
22794}
22795#[doc = "Floating-point round to integral exact, using current rounding mode"]
22796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
22797#[inline]
22798#[target_feature(enable = "neon")]
22799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22800#[cfg_attr(test, assert_instr(frintx))]
22801pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
22802    unsafe { simd_round_ties_even(a) }
22803}
22804#[doc = "Floating-point round to integral exact, using current rounding mode"]
22805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
22806#[inline]
22807#[target_feature(enable = "neon")]
22808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22809#[cfg_attr(test, assert_instr(frintx))]
22810pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
22811    unsafe { simd_round_ties_even(a) }
22812}
22813#[doc = "Floating-point round to integral exact, using current rounding mode"]
22814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
22815#[inline]
22816#[target_feature(enable = "neon")]
22817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22818#[cfg_attr(test, assert_instr(frintx))]
22819pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
22820    unsafe { simd_round_ties_even(a) }
22821}
22822#[doc = "Floating-point round to integral exact, using current rounding mode"]
22823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
22824#[inline]
22825#[target_feature(enable = "neon")]
22826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22827#[cfg_attr(test, assert_instr(frintx))]
22828pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
22829    unsafe { simd_round_ties_even(a) }
22830}
22831#[doc = "Floating-point round to integral, using current rounding mode"]
22832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
22833#[inline]
22834#[target_feature(enable = "neon,fp16")]
22835#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22836#[cfg(not(target_arch = "arm64ec"))]
22837#[cfg_attr(test, assert_instr(frintx))]
22838pub fn vrndxh_f16(a: f16) -> f16 {
22839    round_ties_even_f16(a)
22840}
22841#[doc = "Signed rounding shift left"]
22842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
22843#[inline]
22844#[target_feature(enable = "neon")]
22845#[cfg_attr(test, assert_instr(srshl))]
22846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22847pub fn vrshld_s64(a: i64, b: i64) -> i64 {
22848    unsafe extern "unadjusted" {
22849        #[cfg_attr(
22850            any(target_arch = "aarch64", target_arch = "arm64ec"),
22851            link_name = "llvm.aarch64.neon.srshl.i64"
22852        )]
22853        fn _vrshld_s64(a: i64, b: i64) -> i64;
22854    }
22855    unsafe { _vrshld_s64(a, b) }
22856}
22857#[doc = "Unsigned rounding shift left"]
22858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
22859#[inline]
22860#[target_feature(enable = "neon")]
22861#[cfg_attr(test, assert_instr(urshl))]
22862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22863pub fn vrshld_u64(a: u64, b: i64) -> u64 {
22864    unsafe extern "unadjusted" {
22865        #[cfg_attr(
22866            any(target_arch = "aarch64", target_arch = "arm64ec"),
22867            link_name = "llvm.aarch64.neon.urshl.i64"
22868        )]
22869        fn _vrshld_u64(a: u64, b: i64) -> u64;
22870    }
22871    unsafe { _vrshld_u64(a, b) }
22872}
22873#[doc = "Signed rounding shift right"]
22874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
22875#[inline]
22876#[target_feature(enable = "neon")]
22877#[cfg_attr(test, assert_instr(srshr, N = 2))]
22878#[rustc_legacy_const_generics(1)]
22879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22880pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
22881    static_assert!(N >= 1 && N <= 64);
22882    vrshld_s64(a, -N as i64)
22883}
22884#[doc = "Unsigned rounding shift right"]
22885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
22886#[inline]
22887#[target_feature(enable = "neon")]
22888#[cfg_attr(test, assert_instr(urshr, N = 2))]
22889#[rustc_legacy_const_generics(1)]
22890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22891pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
22892    static_assert!(N >= 1 && N <= 64);
22893    vrshld_u64(a, -N as i64)
22894}
22895#[doc = "Rounding shift right narrow"]
22896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
22897#[inline]
22898#[target_feature(enable = "neon")]
22899#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22900#[rustc_legacy_const_generics(2)]
22901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22902pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
22903    static_assert!(N >= 1 && N <= 8);
22904    unsafe {
22905        simd_shuffle!(
22906            a,
22907            vrshrn_n_s16::<N>(b),
22908            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22909        )
22910    }
22911}
22912#[doc = "Rounding shift right narrow"]
22913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
22914#[inline]
22915#[target_feature(enable = "neon")]
22916#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22917#[rustc_legacy_const_generics(2)]
22918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22919pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
22920    static_assert!(N >= 1 && N <= 16);
22921    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22922}
22923#[doc = "Rounding shift right narrow"]
22924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
22925#[inline]
22926#[target_feature(enable = "neon")]
22927#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22928#[rustc_legacy_const_generics(2)]
22929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22930pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
22931    static_assert!(N >= 1 && N <= 32);
22932    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
22933}
22934#[doc = "Rounding shift right narrow"]
22935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
22936#[inline]
22937#[target_feature(enable = "neon")]
22938#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22939#[rustc_legacy_const_generics(2)]
22940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22941pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
22942    static_assert!(N >= 1 && N <= 8);
22943    unsafe {
22944        simd_shuffle!(
22945            a,
22946            vrshrn_n_u16::<N>(b),
22947            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22948        )
22949    }
22950}
22951#[doc = "Rounding shift right narrow"]
22952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
22953#[inline]
22954#[target_feature(enable = "neon")]
22955#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22956#[rustc_legacy_const_generics(2)]
22957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22958pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
22959    static_assert!(N >= 1 && N <= 16);
22960    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22961}
22962#[doc = "Rounding shift right narrow"]
22963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
22964#[inline]
22965#[target_feature(enable = "neon")]
22966#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22967#[rustc_legacy_const_generics(2)]
22968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22969pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
22970    static_assert!(N >= 1 && N <= 32);
22971    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
22972}
22973#[doc = "Reciprocal square-root estimate."]
22974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
22975#[inline]
22976#[target_feature(enable = "neon")]
22977#[cfg_attr(test, assert_instr(frsqrte))]
22978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22979pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
22980    unsafe extern "unadjusted" {
22981        #[cfg_attr(
22982            any(target_arch = "aarch64", target_arch = "arm64ec"),
22983            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
22984        )]
22985        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
22986    }
22987    unsafe { _vrsqrte_f64(a) }
22988}
22989#[doc = "Reciprocal square-root estimate."]
22990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
22991#[inline]
22992#[target_feature(enable = "neon")]
22993#[cfg_attr(test, assert_instr(frsqrte))]
22994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22995pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
22996    unsafe extern "unadjusted" {
22997        #[cfg_attr(
22998            any(target_arch = "aarch64", target_arch = "arm64ec"),
22999            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
23000        )]
23001        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
23002    }
23003    unsafe { _vrsqrteq_f64(a) }
23004}
23005#[doc = "Reciprocal square-root estimate."]
23006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
23007#[inline]
23008#[target_feature(enable = "neon")]
23009#[cfg_attr(test, assert_instr(frsqrte))]
23010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23011pub fn vrsqrted_f64(a: f64) -> f64 {
23012    unsafe extern "unadjusted" {
23013        #[cfg_attr(
23014            any(target_arch = "aarch64", target_arch = "arm64ec"),
23015            link_name = "llvm.aarch64.neon.frsqrte.f64"
23016        )]
23017        fn _vrsqrted_f64(a: f64) -> f64;
23018    }
23019    unsafe { _vrsqrted_f64(a) }
23020}
23021#[doc = "Reciprocal square-root estimate."]
23022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
23023#[inline]
23024#[target_feature(enable = "neon")]
23025#[cfg_attr(test, assert_instr(frsqrte))]
23026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23027pub fn vrsqrtes_f32(a: f32) -> f32 {
23028    unsafe extern "unadjusted" {
23029        #[cfg_attr(
23030            any(target_arch = "aarch64", target_arch = "arm64ec"),
23031            link_name = "llvm.aarch64.neon.frsqrte.f32"
23032        )]
23033        fn _vrsqrtes_f32(a: f32) -> f32;
23034    }
23035    unsafe { _vrsqrtes_f32(a) }
23036}
23037#[doc = "Reciprocal square-root estimate."]
23038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
23039#[inline]
23040#[cfg_attr(test, assert_instr(frsqrte))]
23041#[target_feature(enable = "neon,fp16")]
23042#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23043#[cfg(not(target_arch = "arm64ec"))]
23044pub fn vrsqrteh_f16(a: f16) -> f16 {
23045    unsafe extern "unadjusted" {
23046        #[cfg_attr(
23047            any(target_arch = "aarch64", target_arch = "arm64ec"),
23048            link_name = "llvm.aarch64.neon.frsqrte.f16"
23049        )]
23050        fn _vrsqrteh_f16(a: f16) -> f16;
23051    }
23052    unsafe { _vrsqrteh_f16(a) }
23053}
23054#[doc = "Floating-point reciprocal square root step"]
23055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
23056#[inline]
23057#[target_feature(enable = "neon")]
23058#[cfg_attr(test, assert_instr(frsqrts))]
23059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23060pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
23061    unsafe extern "unadjusted" {
23062        #[cfg_attr(
23063            any(target_arch = "aarch64", target_arch = "arm64ec"),
23064            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
23065        )]
23066        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
23067    }
23068    unsafe { _vrsqrts_f64(a, b) }
23069}
23070#[doc = "Floating-point reciprocal square root step"]
23071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
23072#[inline]
23073#[target_feature(enable = "neon")]
23074#[cfg_attr(test, assert_instr(frsqrts))]
23075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23076pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
23077    unsafe extern "unadjusted" {
23078        #[cfg_attr(
23079            any(target_arch = "aarch64", target_arch = "arm64ec"),
23080            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
23081        )]
23082        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
23083    }
23084    unsafe { _vrsqrtsq_f64(a, b) }
23085}
23086#[doc = "Floating-point reciprocal square root step"]
23087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
23088#[inline]
23089#[target_feature(enable = "neon")]
23090#[cfg_attr(test, assert_instr(frsqrts))]
23091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23092pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
23093    unsafe extern "unadjusted" {
23094        #[cfg_attr(
23095            any(target_arch = "aarch64", target_arch = "arm64ec"),
23096            link_name = "llvm.aarch64.neon.frsqrts.f64"
23097        )]
23098        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
23099    }
23100    unsafe { _vrsqrtsd_f64(a, b) }
23101}
23102#[doc = "Floating-point reciprocal square root step"]
23103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
23104#[inline]
23105#[target_feature(enable = "neon")]
23106#[cfg_attr(test, assert_instr(frsqrts))]
23107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23108pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
23109    unsafe extern "unadjusted" {
23110        #[cfg_attr(
23111            any(target_arch = "aarch64", target_arch = "arm64ec"),
23112            link_name = "llvm.aarch64.neon.frsqrts.f32"
23113        )]
23114        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
23115    }
23116    unsafe { _vrsqrtss_f32(a, b) }
23117}
23118#[doc = "Floating-point reciprocal square root step"]
23119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
23120#[inline]
23121#[target_feature(enable = "neon,fp16")]
23122#[cfg_attr(test, assert_instr(frsqrts))]
23123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23124#[cfg(not(target_arch = "arm64ec"))]
23125pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
23126    unsafe extern "unadjusted" {
23127        #[cfg_attr(
23128            any(target_arch = "aarch64", target_arch = "arm64ec"),
23129            link_name = "llvm.aarch64.neon.frsqrts.f16"
23130        )]
23131        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
23132    }
23133    unsafe { _vrsqrtsh_f16(a, b) }
23134}
23135#[doc = "Signed rounding shift right and accumulate."]
23136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
23137#[inline]
23138#[target_feature(enable = "neon")]
23139#[cfg_attr(test, assert_instr(srshr, N = 2))]
23140#[rustc_legacy_const_generics(2)]
23141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23142pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23143    static_assert!(N >= 1 && N <= 64);
23144    let b: i64 = vrshrd_n_s64::<N>(b);
23145    a.wrapping_add(b)
23146}
23147#[doc = "Unsigned rounding shift right and accumulate."]
23148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
23149#[inline]
23150#[target_feature(enable = "neon")]
23151#[cfg_attr(test, assert_instr(urshr, N = 2))]
23152#[rustc_legacy_const_generics(2)]
23153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23154pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23155    static_assert!(N >= 1 && N <= 64);
23156    let b: u64 = vrshrd_n_u64::<N>(b);
23157    a.wrapping_add(b)
23158}
23159#[doc = "Rounding subtract returning high narrow"]
23160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23161#[inline]
23162#[target_feature(enable = "neon")]
23163#[cfg(target_endian = "little")]
23164#[cfg_attr(test, assert_instr(rsubhn2))]
23165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23166pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23167    let x: int8x8_t = vrsubhn_s16(b, c);
23168    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23169}
23170#[doc = "Rounding subtract returning high narrow"]
23171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23172#[inline]
23173#[target_feature(enable = "neon")]
23174#[cfg(target_endian = "little")]
23175#[cfg_attr(test, assert_instr(rsubhn2))]
23176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23177pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23178    let x: int16x4_t = vrsubhn_s32(b, c);
23179    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23180}
23181#[doc = "Rounding subtract returning high narrow"]
23182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23183#[inline]
23184#[target_feature(enable = "neon")]
23185#[cfg(target_endian = "little")]
23186#[cfg_attr(test, assert_instr(rsubhn2))]
23187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23188pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23189    let x: int32x2_t = vrsubhn_s64(b, c);
23190    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23191}
23192#[doc = "Rounding subtract returning high narrow"]
23193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23194#[inline]
23195#[target_feature(enable = "neon")]
23196#[cfg(target_endian = "little")]
23197#[cfg_attr(test, assert_instr(rsubhn2))]
23198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23199pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23200    let x: uint8x8_t = vrsubhn_u16(b, c);
23201    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23202}
23203#[doc = "Rounding subtract returning high narrow"]
23204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23205#[inline]
23206#[target_feature(enable = "neon")]
23207#[cfg(target_endian = "little")]
23208#[cfg_attr(test, assert_instr(rsubhn2))]
23209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23210pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23211    let x: uint16x4_t = vrsubhn_u32(b, c);
23212    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23213}
23214#[doc = "Rounding subtract returning high narrow"]
23215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23216#[inline]
23217#[target_feature(enable = "neon")]
23218#[cfg(target_endian = "little")]
23219#[cfg_attr(test, assert_instr(rsubhn2))]
23220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23221pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23222    let x: uint32x2_t = vrsubhn_u64(b, c);
23223    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23224}
23225#[doc = "Rounding subtract returning high narrow"]
23226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23227#[inline]
23228#[target_feature(enable = "neon")]
23229#[cfg(target_endian = "big")]
23230#[cfg_attr(test, assert_instr(rsubhn))]
23231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23232pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23233    let x: int8x8_t = vrsubhn_s16(b, c);
23234    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23235}
23236#[doc = "Rounding subtract returning high narrow"]
23237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23238#[inline]
23239#[target_feature(enable = "neon")]
23240#[cfg(target_endian = "big")]
23241#[cfg_attr(test, assert_instr(rsubhn))]
23242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23243pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23244    let x: int16x4_t = vrsubhn_s32(b, c);
23245    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23246}
23247#[doc = "Rounding subtract returning high narrow"]
23248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23249#[inline]
23250#[target_feature(enable = "neon")]
23251#[cfg(target_endian = "big")]
23252#[cfg_attr(test, assert_instr(rsubhn))]
23253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23254pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23255    let x: int32x2_t = vrsubhn_s64(b, c);
23256    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23257}
23258#[doc = "Rounding subtract returning high narrow"]
23259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23260#[inline]
23261#[target_feature(enable = "neon")]
23262#[cfg(target_endian = "big")]
23263#[cfg_attr(test, assert_instr(rsubhn))]
23264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23265pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23266    let x: uint8x8_t = vrsubhn_u16(b, c);
23267    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23268}
23269#[doc = "Rounding subtract returning high narrow"]
23270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23271#[inline]
23272#[target_feature(enable = "neon")]
23273#[cfg(target_endian = "big")]
23274#[cfg_attr(test, assert_instr(rsubhn))]
23275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23276pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23277    let x: uint16x4_t = vrsubhn_u32(b, c);
23278    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23279}
23280#[doc = "Rounding subtract returning high narrow"]
23281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23282#[inline]
23283#[target_feature(enable = "neon")]
23284#[cfg(target_endian = "big")]
23285#[cfg_attr(test, assert_instr(rsubhn))]
23286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23287pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23288    let x: uint32x2_t = vrsubhn_u64(b, c);
23289    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23290}
23291#[doc = "Insert vector element from another vector element"]
23292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
23293#[inline]
23294#[target_feature(enable = "neon")]
23295#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23296#[rustc_legacy_const_generics(2)]
23297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23298pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
23299    static_assert!(LANE == 0);
23300    unsafe { simd_insert!(b, LANE as u32, a) }
23301}
23302#[doc = "Insert vector element from another vector element"]
23303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
23304#[inline]
23305#[target_feature(enable = "neon")]
23306#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23307#[rustc_legacy_const_generics(2)]
23308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23309pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
23310    static_assert_uimm_bits!(LANE, 1);
23311    unsafe { simd_insert!(b, LANE as u32, a) }
23312}
23313#[doc = "SHA512 hash update part 2"]
23314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
23315#[inline]
23316#[target_feature(enable = "neon,sha3")]
23317#[cfg_attr(test, assert_instr(sha512h2))]
23318#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23319pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23320    unsafe extern "unadjusted" {
23321        #[cfg_attr(
23322            any(target_arch = "aarch64", target_arch = "arm64ec"),
23323            link_name = "llvm.aarch64.crypto.sha512h2"
23324        )]
23325        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23326    }
23327    unsafe { _vsha512h2q_u64(a, b, c) }
23328}
23329#[doc = "SHA512 hash update part 1"]
23330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
23331#[inline]
23332#[target_feature(enable = "neon,sha3")]
23333#[cfg_attr(test, assert_instr(sha512h))]
23334#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23335pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23336    unsafe extern "unadjusted" {
23337        #[cfg_attr(
23338            any(target_arch = "aarch64", target_arch = "arm64ec"),
23339            link_name = "llvm.aarch64.crypto.sha512h"
23340        )]
23341        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23342    }
23343    unsafe { _vsha512hq_u64(a, b, c) }
23344}
23345#[doc = "SHA512 schedule update 0"]
23346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
23347#[inline]
23348#[target_feature(enable = "neon,sha3")]
23349#[cfg_attr(test, assert_instr(sha512su0))]
23350#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23351pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23352    unsafe extern "unadjusted" {
23353        #[cfg_attr(
23354            any(target_arch = "aarch64", target_arch = "arm64ec"),
23355            link_name = "llvm.aarch64.crypto.sha512su0"
23356        )]
23357        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
23358    }
23359    unsafe { _vsha512su0q_u64(a, b) }
23360}
23361#[doc = "SHA512 schedule update 1"]
23362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
23363#[inline]
23364#[target_feature(enable = "neon,sha3")]
23365#[cfg_attr(test, assert_instr(sha512su1))]
23366#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23367pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23368    unsafe extern "unadjusted" {
23369        #[cfg_attr(
23370            any(target_arch = "aarch64", target_arch = "arm64ec"),
23371            link_name = "llvm.aarch64.crypto.sha512su1"
23372        )]
23373        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23374    }
23375    unsafe { _vsha512su1q_u64(a, b, c) }
23376}
23377#[doc = "Signed Shift left"]
23378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
23379#[inline]
23380#[target_feature(enable = "neon")]
23381#[cfg_attr(test, assert_instr(sshl))]
23382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23383pub fn vshld_s64(a: i64, b: i64) -> i64 {
23384    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
23385}
23386#[doc = "Unsigned Shift left"]
23387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
23388#[inline]
23389#[target_feature(enable = "neon")]
23390#[cfg_attr(test, assert_instr(ushl))]
23391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23392pub fn vshld_u64(a: u64, b: i64) -> u64 {
23393    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
23394}
23395#[doc = "Signed shift left long"]
23396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
23397#[inline]
23398#[target_feature(enable = "neon")]
23399#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23400#[rustc_legacy_const_generics(1)]
23401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23402pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
23403    static_assert!(N >= 0 && N <= 8);
23404    unsafe {
23405        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23406        vshll_n_s8::<N>(b)
23407    }
23408}
23409#[doc = "Signed shift left long"]
23410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
23411#[inline]
23412#[target_feature(enable = "neon")]
23413#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23414#[rustc_legacy_const_generics(1)]
23415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23416pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23417    static_assert!(N >= 0 && N <= 16);
23418    unsafe {
23419        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23420        vshll_n_s16::<N>(b)
23421    }
23422}
23423#[doc = "Signed shift left long"]
23424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
23425#[inline]
23426#[target_feature(enable = "neon")]
23427#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23428#[rustc_legacy_const_generics(1)]
23429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23430pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
23431    static_assert!(N >= 0 && N <= 32);
23432    unsafe {
23433        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
23434        vshll_n_s32::<N>(b)
23435    }
23436}
23437#[doc = "Signed shift left long"]
23438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
23439#[inline]
23440#[target_feature(enable = "neon")]
23441#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23442#[rustc_legacy_const_generics(1)]
23443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23444pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
23445    static_assert!(N >= 0 && N <= 8);
23446    unsafe {
23447        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23448        vshll_n_u8::<N>(b)
23449    }
23450}
23451#[doc = "Signed shift left long"]
23452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
23453#[inline]
23454#[target_feature(enable = "neon")]
23455#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23456#[rustc_legacy_const_generics(1)]
23457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23458pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
23459    static_assert!(N >= 0 && N <= 16);
23460    unsafe {
23461        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23462        vshll_n_u16::<N>(b)
23463    }
23464}
23465#[doc = "Signed shift left long"]
23466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
23467#[inline]
23468#[target_feature(enable = "neon")]
23469#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23470#[rustc_legacy_const_generics(1)]
23471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23472pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
23473    static_assert!(N >= 0 && N <= 32);
23474    unsafe {
23475        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
23476        vshll_n_u32::<N>(b)
23477    }
23478}
23479#[doc = "Shift right narrow"]
23480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
23481#[inline]
23482#[target_feature(enable = "neon")]
23483#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23484#[rustc_legacy_const_generics(2)]
23485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23486pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23487    static_assert!(N >= 1 && N <= 8);
23488    unsafe {
23489        simd_shuffle!(
23490            a,
23491            vshrn_n_s16::<N>(b),
23492            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23493        )
23494    }
23495}
23496#[doc = "Shift right narrow"]
23497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
23498#[inline]
23499#[target_feature(enable = "neon")]
23500#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23501#[rustc_legacy_const_generics(2)]
23502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23503pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23504    static_assert!(N >= 1 && N <= 16);
23505    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23506}
23507#[doc = "Shift right narrow"]
23508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
23509#[inline]
23510#[target_feature(enable = "neon")]
23511#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23512#[rustc_legacy_const_generics(2)]
23513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23514pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23515    static_assert!(N >= 1 && N <= 32);
23516    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23517}
23518#[doc = "Shift right narrow"]
23519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
23520#[inline]
23521#[target_feature(enable = "neon")]
23522#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23523#[rustc_legacy_const_generics(2)]
23524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23525pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23526    static_assert!(N >= 1 && N <= 8);
23527    unsafe {
23528        simd_shuffle!(
23529            a,
23530            vshrn_n_u16::<N>(b),
23531            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23532        )
23533    }
23534}
23535#[doc = "Shift right narrow"]
23536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
23537#[inline]
23538#[target_feature(enable = "neon")]
23539#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23540#[rustc_legacy_const_generics(2)]
23541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23542pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23543    static_assert!(N >= 1 && N <= 16);
23544    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23545}
23546#[doc = "Shift right narrow"]
23547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
23548#[inline]
23549#[target_feature(enable = "neon")]
23550#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23551#[rustc_legacy_const_generics(2)]
23552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23553pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23554    static_assert!(N >= 1 && N <= 32);
23555    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23556}
23557#[doc = "Shift Left and Insert (immediate)"]
23558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
23559#[inline]
23560#[target_feature(enable = "neon")]
23561#[cfg_attr(test, assert_instr(sli, N = 1))]
23562#[rustc_legacy_const_generics(2)]
23563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23564pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23565    static_assert_uimm_bits!(N, 3);
23566    unsafe extern "unadjusted" {
23567        #[cfg_attr(
23568            any(target_arch = "aarch64", target_arch = "arm64ec"),
23569            link_name = "llvm.aarch64.neon.vsli.v8i8"
23570        )]
23571        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
23572    }
23573    unsafe { _vsli_n_s8(a, b, N) }
23574}
23575#[doc = "Shift Left and Insert (immediate)"]
23576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
23577#[inline]
23578#[target_feature(enable = "neon")]
23579#[cfg_attr(test, assert_instr(sli, N = 1))]
23580#[rustc_legacy_const_generics(2)]
23581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23582pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23583    static_assert_uimm_bits!(N, 3);
23584    unsafe extern "unadjusted" {
23585        #[cfg_attr(
23586            any(target_arch = "aarch64", target_arch = "arm64ec"),
23587            link_name = "llvm.aarch64.neon.vsli.v16i8"
23588        )]
23589        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
23590    }
23591    unsafe { _vsliq_n_s8(a, b, N) }
23592}
23593#[doc = "Shift Left and Insert (immediate)"]
23594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
23595#[inline]
23596#[target_feature(enable = "neon")]
23597#[cfg_attr(test, assert_instr(sli, N = 1))]
23598#[rustc_legacy_const_generics(2)]
23599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23600pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23601    static_assert_uimm_bits!(N, 4);
23602    unsafe extern "unadjusted" {
23603        #[cfg_attr(
23604            any(target_arch = "aarch64", target_arch = "arm64ec"),
23605            link_name = "llvm.aarch64.neon.vsli.v4i16"
23606        )]
23607        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
23608    }
23609    unsafe { _vsli_n_s16(a, b, N) }
23610}
23611#[doc = "Shift Left and Insert (immediate)"]
23612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
23613#[inline]
23614#[target_feature(enable = "neon")]
23615#[cfg_attr(test, assert_instr(sli, N = 1))]
23616#[rustc_legacy_const_generics(2)]
23617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23618pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23619    static_assert_uimm_bits!(N, 4);
23620    unsafe extern "unadjusted" {
23621        #[cfg_attr(
23622            any(target_arch = "aarch64", target_arch = "arm64ec"),
23623            link_name = "llvm.aarch64.neon.vsli.v8i16"
23624        )]
23625        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
23626    }
23627    unsafe { _vsliq_n_s16(a, b, N) }
23628}
23629#[doc = "Shift Left and Insert (immediate)"]
23630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
23631#[inline]
23632#[target_feature(enable = "neon")]
23633#[cfg_attr(test, assert_instr(sli, N = 1))]
23634#[rustc_legacy_const_generics(2)]
23635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23636pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23637    static_assert!(N >= 0 && N <= 31);
23638    unsafe extern "unadjusted" {
23639        #[cfg_attr(
23640            any(target_arch = "aarch64", target_arch = "arm64ec"),
23641            link_name = "llvm.aarch64.neon.vsli.v2i32"
23642        )]
23643        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
23644    }
23645    unsafe { _vsli_n_s32(a, b, N) }
23646}
23647#[doc = "Shift Left and Insert (immediate)"]
23648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
23649#[inline]
23650#[target_feature(enable = "neon")]
23651#[cfg_attr(test, assert_instr(sli, N = 1))]
23652#[rustc_legacy_const_generics(2)]
23653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23654pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23655    static_assert!(N >= 0 && N <= 31);
23656    unsafe extern "unadjusted" {
23657        #[cfg_attr(
23658            any(target_arch = "aarch64", target_arch = "arm64ec"),
23659            link_name = "llvm.aarch64.neon.vsli.v4i32"
23660        )]
23661        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
23662    }
23663    unsafe { _vsliq_n_s32(a, b, N) }
23664}
23665#[doc = "Shift Left and Insert (immediate)"]
23666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
23667#[inline]
23668#[target_feature(enable = "neon")]
23669#[cfg_attr(test, assert_instr(sli, N = 1))]
23670#[rustc_legacy_const_generics(2)]
23671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23672pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23673    static_assert!(N >= 0 && N <= 63);
23674    unsafe extern "unadjusted" {
23675        #[cfg_attr(
23676            any(target_arch = "aarch64", target_arch = "arm64ec"),
23677            link_name = "llvm.aarch64.neon.vsli.v1i64"
23678        )]
23679        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
23680    }
23681    unsafe { _vsli_n_s64(a, b, N) }
23682}
23683#[doc = "Shift Left and Insert (immediate)"]
23684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
23685#[inline]
23686#[target_feature(enable = "neon")]
23687#[cfg_attr(test, assert_instr(sli, N = 1))]
23688#[rustc_legacy_const_generics(2)]
23689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23690pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23691    static_assert!(N >= 0 && N <= 63);
23692    unsafe extern "unadjusted" {
23693        #[cfg_attr(
23694            any(target_arch = "aarch64", target_arch = "arm64ec"),
23695            link_name = "llvm.aarch64.neon.vsli.v2i64"
23696        )]
23697        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
23698    }
23699    unsafe { _vsliq_n_s64(a, b, N) }
23700}
23701#[doc = "Shift Left and Insert (immediate)"]
23702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
23703#[inline]
23704#[target_feature(enable = "neon")]
23705#[cfg_attr(test, assert_instr(sli, N = 1))]
23706#[rustc_legacy_const_generics(2)]
23707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23708pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23709    static_assert_uimm_bits!(N, 3);
23710    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23711}
23712#[doc = "Shift Left and Insert (immediate)"]
23713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
23714#[inline]
23715#[target_feature(enable = "neon")]
23716#[cfg_attr(test, assert_instr(sli, N = 1))]
23717#[rustc_legacy_const_generics(2)]
23718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23719pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23720    static_assert_uimm_bits!(N, 3);
23721    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23722}
23723#[doc = "Shift Left and Insert (immediate)"]
23724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
23725#[inline]
23726#[target_feature(enable = "neon")]
23727#[cfg_attr(test, assert_instr(sli, N = 1))]
23728#[rustc_legacy_const_generics(2)]
23729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23730pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23731    static_assert_uimm_bits!(N, 4);
23732    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23733}
23734#[doc = "Shift Left and Insert (immediate)"]
23735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
23736#[inline]
23737#[target_feature(enable = "neon")]
23738#[cfg_attr(test, assert_instr(sli, N = 1))]
23739#[rustc_legacy_const_generics(2)]
23740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23741pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23742    static_assert_uimm_bits!(N, 4);
23743    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23744}
23745#[doc = "Shift Left and Insert (immediate)"]
23746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
23747#[inline]
23748#[target_feature(enable = "neon")]
23749#[cfg_attr(test, assert_instr(sli, N = 1))]
23750#[rustc_legacy_const_generics(2)]
23751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23752pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23753    static_assert!(N >= 0 && N <= 31);
23754    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
23755}
23756#[doc = "Shift Left and Insert (immediate)"]
23757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
23758#[inline]
23759#[target_feature(enable = "neon")]
23760#[cfg_attr(test, assert_instr(sli, N = 1))]
23761#[rustc_legacy_const_generics(2)]
23762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23763pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23764    static_assert!(N >= 0 && N <= 31);
23765    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
23766}
23767#[doc = "Shift Left and Insert (immediate)"]
23768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
23769#[inline]
23770#[target_feature(enable = "neon")]
23771#[cfg_attr(test, assert_instr(sli, N = 1))]
23772#[rustc_legacy_const_generics(2)]
23773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23774pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
23775    static_assert!(N >= 0 && N <= 63);
23776    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23777}
23778#[doc = "Shift Left and Insert (immediate)"]
23779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
23780#[inline]
23781#[target_feature(enable = "neon")]
23782#[cfg_attr(test, assert_instr(sli, N = 1))]
23783#[rustc_legacy_const_generics(2)]
23784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23785pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23786    static_assert!(N >= 0 && N <= 63);
23787    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23788}
23789#[doc = "Shift Left and Insert (immediate)"]
23790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
23791#[inline]
23792#[target_feature(enable = "neon")]
23793#[cfg_attr(test, assert_instr(sli, N = 1))]
23794#[rustc_legacy_const_generics(2)]
23795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23796pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
23797    static_assert_uimm_bits!(N, 3);
23798    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23799}
23800#[doc = "Shift Left and Insert (immediate)"]
23801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
23802#[inline]
23803#[target_feature(enable = "neon")]
23804#[cfg_attr(test, assert_instr(sli, N = 1))]
23805#[rustc_legacy_const_generics(2)]
23806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23807pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
23808    static_assert_uimm_bits!(N, 3);
23809    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23810}
23811#[doc = "Shift Left and Insert (immediate)"]
23812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
23813#[inline]
23814#[target_feature(enable = "neon")]
23815#[cfg_attr(test, assert_instr(sli, N = 1))]
23816#[rustc_legacy_const_generics(2)]
23817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23818pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
23819    static_assert_uimm_bits!(N, 4);
23820    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23821}
23822#[doc = "Shift Left and Insert (immediate)"]
23823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
23824#[inline]
23825#[target_feature(enable = "neon")]
23826#[cfg_attr(test, assert_instr(sli, N = 1))]
23827#[rustc_legacy_const_generics(2)]
23828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23829pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
23830    static_assert_uimm_bits!(N, 4);
23831    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23832}
23833#[doc = "Shift Left and Insert (immediate)"]
23834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
23835#[inline]
23836#[target_feature(enable = "neon,aes")]
23837#[cfg_attr(test, assert_instr(sli, N = 1))]
23838#[rustc_legacy_const_generics(2)]
23839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23840pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
23841    static_assert!(N >= 0 && N <= 63);
23842    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23843}
23844#[doc = "Shift Left and Insert (immediate)"]
23845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
23846#[inline]
23847#[target_feature(enable = "neon,aes")]
23848#[cfg_attr(test, assert_instr(sli, N = 1))]
23849#[rustc_legacy_const_generics(2)]
23850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23851pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
23852    static_assert!(N >= 0 && N <= 63);
23853    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23854}
23855#[doc = "Shift left and insert"]
23856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
23857#[inline]
23858#[target_feature(enable = "neon")]
23859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23860#[rustc_legacy_const_generics(2)]
23861#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23862pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23863    static_assert!(N >= 0 && N <= 63);
23864    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23865}
23866#[doc = "Shift left and insert"]
23867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
23868#[inline]
23869#[target_feature(enable = "neon")]
23870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23871#[rustc_legacy_const_generics(2)]
23872#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23873pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23874    static_assert!(N >= 0 && N <= 63);
23875    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
23876}
23877#[doc = "SM3PARTW1"]
23878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
23879#[inline]
23880#[target_feature(enable = "neon,sm4")]
23881#[cfg_attr(test, assert_instr(sm3partw1))]
23882#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23883pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23884    unsafe extern "unadjusted" {
23885        #[cfg_attr(
23886            any(target_arch = "aarch64", target_arch = "arm64ec"),
23887            link_name = "llvm.aarch64.crypto.sm3partw1"
23888        )]
23889        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23890    }
23891    unsafe { _vsm3partw1q_u32(a, b, c) }
23892}
23893#[doc = "SM3PARTW2"]
23894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
23895#[inline]
23896#[target_feature(enable = "neon,sm4")]
23897#[cfg_attr(test, assert_instr(sm3partw2))]
23898#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23899pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23900    unsafe extern "unadjusted" {
23901        #[cfg_attr(
23902            any(target_arch = "aarch64", target_arch = "arm64ec"),
23903            link_name = "llvm.aarch64.crypto.sm3partw2"
23904        )]
23905        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23906    }
23907    unsafe { _vsm3partw2q_u32(a, b, c) }
23908}
23909#[doc = "SM3SS1"]
23910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
23911#[inline]
23912#[target_feature(enable = "neon,sm4")]
23913#[cfg_attr(test, assert_instr(sm3ss1))]
23914#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23915pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23916    unsafe extern "unadjusted" {
23917        #[cfg_attr(
23918            any(target_arch = "aarch64", target_arch = "arm64ec"),
23919            link_name = "llvm.aarch64.crypto.sm3ss1"
23920        )]
23921        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23922    }
23923    unsafe { _vsm3ss1q_u32(a, b, c) }
23924}
23925#[doc = "SM3TT1A"]
23926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
23927#[inline]
23928#[target_feature(enable = "neon,sm4")]
23929#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
23930#[rustc_legacy_const_generics(3)]
23931#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23932pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23933    static_assert_uimm_bits!(IMM2, 2);
23934    unsafe extern "unadjusted" {
23935        #[cfg_attr(
23936            any(target_arch = "aarch64", target_arch = "arm64ec"),
23937            link_name = "llvm.aarch64.crypto.sm3tt1a"
23938        )]
23939        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23940    }
23941    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
23942}
23943#[doc = "SM3TT1B"]
23944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
23945#[inline]
23946#[target_feature(enable = "neon,sm4")]
23947#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
23948#[rustc_legacy_const_generics(3)]
23949#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23950pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23951    static_assert_uimm_bits!(IMM2, 2);
23952    unsafe extern "unadjusted" {
23953        #[cfg_attr(
23954            any(target_arch = "aarch64", target_arch = "arm64ec"),
23955            link_name = "llvm.aarch64.crypto.sm3tt1b"
23956        )]
23957        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23958    }
23959    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
23960}
23961#[doc = "SM3TT2A"]
23962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
23963#[inline]
23964#[target_feature(enable = "neon,sm4")]
23965#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
23966#[rustc_legacy_const_generics(3)]
23967#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23968pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23969    static_assert_uimm_bits!(IMM2, 2);
23970    unsafe extern "unadjusted" {
23971        #[cfg_attr(
23972            any(target_arch = "aarch64", target_arch = "arm64ec"),
23973            link_name = "llvm.aarch64.crypto.sm3tt2a"
23974        )]
23975        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23976    }
23977    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
23978}
23979#[doc = "SM3TT2B"]
23980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
23981#[inline]
23982#[target_feature(enable = "neon,sm4")]
23983#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
23984#[rustc_legacy_const_generics(3)]
23985#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23986pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23987    static_assert_uimm_bits!(IMM2, 2);
23988    unsafe extern "unadjusted" {
23989        #[cfg_attr(
23990            any(target_arch = "aarch64", target_arch = "arm64ec"),
23991            link_name = "llvm.aarch64.crypto.sm3tt2b"
23992        )]
23993        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23994    }
23995    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
23996}
23997#[doc = "SM4 key"]
23998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
23999#[inline]
24000#[target_feature(enable = "neon,sm4")]
24001#[cfg_attr(test, assert_instr(sm4ekey))]
24002#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24003pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24004    unsafe extern "unadjusted" {
24005        #[cfg_attr(
24006            any(target_arch = "aarch64", target_arch = "arm64ec"),
24007            link_name = "llvm.aarch64.crypto.sm4ekey"
24008        )]
24009        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24010    }
24011    unsafe { _vsm4ekeyq_u32(a, b) }
24012}
24013#[doc = "SM4 encode"]
24014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
24015#[inline]
24016#[target_feature(enable = "neon,sm4")]
24017#[cfg_attr(test, assert_instr(sm4e))]
24018#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24019pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24020    unsafe extern "unadjusted" {
24021        #[cfg_attr(
24022            any(target_arch = "aarch64", target_arch = "arm64ec"),
24023            link_name = "llvm.aarch64.crypto.sm4e"
24024        )]
24025        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24026    }
24027    unsafe { _vsm4eq_u32(a, b) }
24028}
24029#[doc = "Unsigned saturating Accumulate of Signed value."]
24030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
24031#[inline]
24032#[target_feature(enable = "neon")]
24033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24034#[cfg_attr(test, assert_instr(usqadd))]
24035pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
24036    unsafe extern "unadjusted" {
24037        #[cfg_attr(
24038            any(target_arch = "aarch64", target_arch = "arm64ec"),
24039            link_name = "llvm.aarch64.neon.usqadd.v8i8"
24040        )]
24041        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
24042    }
24043    unsafe { _vsqadd_u8(a, b) }
24044}
24045#[doc = "Unsigned saturating Accumulate of Signed value."]
24046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
24047#[inline]
24048#[target_feature(enable = "neon")]
24049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24050#[cfg_attr(test, assert_instr(usqadd))]
24051pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
24052    unsafe extern "unadjusted" {
24053        #[cfg_attr(
24054            any(target_arch = "aarch64", target_arch = "arm64ec"),
24055            link_name = "llvm.aarch64.neon.usqadd.v16i8"
24056        )]
24057        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
24058    }
24059    unsafe { _vsqaddq_u8(a, b) }
24060}
24061#[doc = "Unsigned saturating Accumulate of Signed value."]
24062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
24063#[inline]
24064#[target_feature(enable = "neon")]
24065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24066#[cfg_attr(test, assert_instr(usqadd))]
24067pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
24068    unsafe extern "unadjusted" {
24069        #[cfg_attr(
24070            any(target_arch = "aarch64", target_arch = "arm64ec"),
24071            link_name = "llvm.aarch64.neon.usqadd.v4i16"
24072        )]
24073        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
24074    }
24075    unsafe { _vsqadd_u16(a, b) }
24076}
24077#[doc = "Unsigned saturating Accumulate of Signed value."]
24078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
24079#[inline]
24080#[target_feature(enable = "neon")]
24081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24082#[cfg_attr(test, assert_instr(usqadd))]
24083pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
24084    unsafe extern "unadjusted" {
24085        #[cfg_attr(
24086            any(target_arch = "aarch64", target_arch = "arm64ec"),
24087            link_name = "llvm.aarch64.neon.usqadd.v8i16"
24088        )]
24089        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
24090    }
24091    unsafe { _vsqaddq_u16(a, b) }
24092}
24093#[doc = "Unsigned saturating Accumulate of Signed value."]
24094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
24095#[inline]
24096#[target_feature(enable = "neon")]
24097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24098#[cfg_attr(test, assert_instr(usqadd))]
24099pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
24100    unsafe extern "unadjusted" {
24101        #[cfg_attr(
24102            any(target_arch = "aarch64", target_arch = "arm64ec"),
24103            link_name = "llvm.aarch64.neon.usqadd.v2i32"
24104        )]
24105        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
24106    }
24107    unsafe { _vsqadd_u32(a, b) }
24108}
24109#[doc = "Unsigned saturating Accumulate of Signed value."]
24110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
24111#[inline]
24112#[target_feature(enable = "neon")]
24113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24114#[cfg_attr(test, assert_instr(usqadd))]
24115pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
24116    unsafe extern "unadjusted" {
24117        #[cfg_attr(
24118            any(target_arch = "aarch64", target_arch = "arm64ec"),
24119            link_name = "llvm.aarch64.neon.usqadd.v4i32"
24120        )]
24121        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
24122    }
24123    unsafe { _vsqaddq_u32(a, b) }
24124}
24125#[doc = "Unsigned saturating Accumulate of Signed value."]
24126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
24127#[inline]
24128#[target_feature(enable = "neon")]
24129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24130#[cfg_attr(test, assert_instr(usqadd))]
24131pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
24132    unsafe extern "unadjusted" {
24133        #[cfg_attr(
24134            any(target_arch = "aarch64", target_arch = "arm64ec"),
24135            link_name = "llvm.aarch64.neon.usqadd.v1i64"
24136        )]
24137        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
24138    }
24139    unsafe { _vsqadd_u64(a, b) }
24140}
24141#[doc = "Unsigned saturating Accumulate of Signed value."]
24142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
24143#[inline]
24144#[target_feature(enable = "neon")]
24145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24146#[cfg_attr(test, assert_instr(usqadd))]
24147pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
24148    unsafe extern "unadjusted" {
24149        #[cfg_attr(
24150            any(target_arch = "aarch64", target_arch = "arm64ec"),
24151            link_name = "llvm.aarch64.neon.usqadd.v2i64"
24152        )]
24153        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
24154    }
24155    unsafe { _vsqaddq_u64(a, b) }
24156}
24157#[doc = "Unsigned saturating accumulate of signed value"]
24158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
24159#[inline]
24160#[target_feature(enable = "neon")]
24161#[cfg_attr(test, assert_instr(usqadd))]
24162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24163pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
24164    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
24165}
24166#[doc = "Unsigned saturating accumulate of signed value"]
24167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
24168#[inline]
24169#[target_feature(enable = "neon")]
24170#[cfg_attr(test, assert_instr(usqadd))]
24171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24172pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
24173    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
24174}
24175#[doc = "Unsigned saturating accumulate of signed value"]
24176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
24177#[inline]
24178#[target_feature(enable = "neon")]
24179#[cfg_attr(test, assert_instr(usqadd))]
24180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24181pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
24182    unsafe extern "unadjusted" {
24183        #[cfg_attr(
24184            any(target_arch = "aarch64", target_arch = "arm64ec"),
24185            link_name = "llvm.aarch64.neon.usqadd.i64"
24186        )]
24187        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
24188    }
24189    unsafe { _vsqaddd_u64(a, b) }
24190}
24191#[doc = "Unsigned saturating accumulate of signed value"]
24192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
24193#[inline]
24194#[target_feature(enable = "neon")]
24195#[cfg_attr(test, assert_instr(usqadd))]
24196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24197pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
24198    unsafe extern "unadjusted" {
24199        #[cfg_attr(
24200            any(target_arch = "aarch64", target_arch = "arm64ec"),
24201            link_name = "llvm.aarch64.neon.usqadd.i32"
24202        )]
24203        fn _vsqadds_u32(a: u32, b: i32) -> u32;
24204    }
24205    unsafe { _vsqadds_u32(a, b) }
24206}
24207#[doc = "Calculates the square root of each lane."]
24208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
24209#[inline]
24210#[cfg_attr(test, assert_instr(fsqrt))]
24211#[target_feature(enable = "neon,fp16")]
24212#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24213#[cfg(not(target_arch = "arm64ec"))]
24214pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
24215    unsafe { simd_fsqrt(a) }
24216}
24217#[doc = "Calculates the square root of each lane."]
24218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
24219#[inline]
24220#[cfg_attr(test, assert_instr(fsqrt))]
24221#[target_feature(enable = "neon,fp16")]
24222#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24223#[cfg(not(target_arch = "arm64ec"))]
24224pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
24225    unsafe { simd_fsqrt(a) }
24226}
24227#[doc = "Calculates the square root of each lane."]
24228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
24229#[inline]
24230#[target_feature(enable = "neon")]
24231#[cfg_attr(test, assert_instr(fsqrt))]
24232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24233pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
24234    unsafe { simd_fsqrt(a) }
24235}
24236#[doc = "Calculates the square root of each lane."]
24237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
24238#[inline]
24239#[target_feature(enable = "neon")]
24240#[cfg_attr(test, assert_instr(fsqrt))]
24241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24242pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
24243    unsafe { simd_fsqrt(a) }
24244}
24245#[doc = "Calculates the square root of each lane."]
24246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
24247#[inline]
24248#[target_feature(enable = "neon")]
24249#[cfg_attr(test, assert_instr(fsqrt))]
24250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24251pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
24252    unsafe { simd_fsqrt(a) }
24253}
24254#[doc = "Calculates the square root of each lane."]
24255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
24256#[inline]
24257#[target_feature(enable = "neon")]
24258#[cfg_attr(test, assert_instr(fsqrt))]
24259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24260pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
24261    unsafe { simd_fsqrt(a) }
24262}
24263#[doc = "Floating-point round to integral, using current rounding mode"]
24264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
24265#[inline]
24266#[target_feature(enable = "neon,fp16")]
24267#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24268#[cfg(not(target_arch = "arm64ec"))]
24269#[cfg_attr(test, assert_instr(fsqrt))]
24270pub fn vsqrth_f16(a: f16) -> f16 {
24271    sqrtf16(a)
24272}
24273#[doc = "Shift Right and Insert (immediate)"]
24274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
24275#[inline]
24276#[target_feature(enable = "neon")]
24277#[cfg_attr(test, assert_instr(sri, N = 1))]
24278#[rustc_legacy_const_generics(2)]
24279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24280pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24281    static_assert!(N >= 1 && N <= 8);
24282    unsafe extern "unadjusted" {
24283        #[cfg_attr(
24284            any(target_arch = "aarch64", target_arch = "arm64ec"),
24285            link_name = "llvm.aarch64.neon.vsri.v8i8"
24286        )]
24287        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24288    }
24289    unsafe { _vsri_n_s8(a, b, N) }
24290}
24291#[doc = "Shift Right and Insert (immediate)"]
24292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
24293#[inline]
24294#[target_feature(enable = "neon")]
24295#[cfg_attr(test, assert_instr(sri, N = 1))]
24296#[rustc_legacy_const_generics(2)]
24297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24298pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24299    static_assert!(N >= 1 && N <= 8);
24300    unsafe extern "unadjusted" {
24301        #[cfg_attr(
24302            any(target_arch = "aarch64", target_arch = "arm64ec"),
24303            link_name = "llvm.aarch64.neon.vsri.v16i8"
24304        )]
24305        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24306    }
24307    unsafe { _vsriq_n_s8(a, b, N) }
24308}
24309#[doc = "Shift Right and Insert (immediate)"]
24310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
24311#[inline]
24312#[target_feature(enable = "neon")]
24313#[cfg_attr(test, assert_instr(sri, N = 1))]
24314#[rustc_legacy_const_generics(2)]
24315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24316pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24317    static_assert!(N >= 1 && N <= 16);
24318    unsafe extern "unadjusted" {
24319        #[cfg_attr(
24320            any(target_arch = "aarch64", target_arch = "arm64ec"),
24321            link_name = "llvm.aarch64.neon.vsri.v4i16"
24322        )]
24323        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24324    }
24325    unsafe { _vsri_n_s16(a, b, N) }
24326}
24327#[doc = "Shift Right and Insert (immediate)"]
24328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
24329#[inline]
24330#[target_feature(enable = "neon")]
24331#[cfg_attr(test, assert_instr(sri, N = 1))]
24332#[rustc_legacy_const_generics(2)]
24333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24334pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24335    static_assert!(N >= 1 && N <= 16);
24336    unsafe extern "unadjusted" {
24337        #[cfg_attr(
24338            any(target_arch = "aarch64", target_arch = "arm64ec"),
24339            link_name = "llvm.aarch64.neon.vsri.v8i16"
24340        )]
24341        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24342    }
24343    unsafe { _vsriq_n_s16(a, b, N) }
24344}
24345#[doc = "Shift Right and Insert (immediate)"]
24346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
24347#[inline]
24348#[target_feature(enable = "neon")]
24349#[cfg_attr(test, assert_instr(sri, N = 1))]
24350#[rustc_legacy_const_generics(2)]
24351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24352pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24353    static_assert!(N >= 1 && N <= 32);
24354    unsafe extern "unadjusted" {
24355        #[cfg_attr(
24356            any(target_arch = "aarch64", target_arch = "arm64ec"),
24357            link_name = "llvm.aarch64.neon.vsri.v2i32"
24358        )]
24359        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24360    }
24361    unsafe { _vsri_n_s32(a, b, N) }
24362}
24363#[doc = "Shift Right and Insert (immediate)"]
24364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
24365#[inline]
24366#[target_feature(enable = "neon")]
24367#[cfg_attr(test, assert_instr(sri, N = 1))]
24368#[rustc_legacy_const_generics(2)]
24369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24370pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24371    static_assert!(N >= 1 && N <= 32);
24372    unsafe extern "unadjusted" {
24373        #[cfg_attr(
24374            any(target_arch = "aarch64", target_arch = "arm64ec"),
24375            link_name = "llvm.aarch64.neon.vsri.v4i32"
24376        )]
24377        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24378    }
24379    unsafe { _vsriq_n_s32(a, b, N) }
24380}
24381#[doc = "Shift Right and Insert (immediate)"]
24382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
24383#[inline]
24384#[target_feature(enable = "neon")]
24385#[cfg_attr(test, assert_instr(sri, N = 1))]
24386#[rustc_legacy_const_generics(2)]
24387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24388pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24389    static_assert!(N >= 1 && N <= 64);
24390    unsafe extern "unadjusted" {
24391        #[cfg_attr(
24392            any(target_arch = "aarch64", target_arch = "arm64ec"),
24393            link_name = "llvm.aarch64.neon.vsri.v1i64"
24394        )]
24395        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24396    }
24397    unsafe { _vsri_n_s64(a, b, N) }
24398}
24399#[doc = "Shift Right and Insert (immediate)"]
24400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
24401#[inline]
24402#[target_feature(enable = "neon")]
24403#[cfg_attr(test, assert_instr(sri, N = 1))]
24404#[rustc_legacy_const_generics(2)]
24405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24406pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24407    static_assert!(N >= 1 && N <= 64);
24408    unsafe extern "unadjusted" {
24409        #[cfg_attr(
24410            any(target_arch = "aarch64", target_arch = "arm64ec"),
24411            link_name = "llvm.aarch64.neon.vsri.v2i64"
24412        )]
24413        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24414    }
24415    unsafe { _vsriq_n_s64(a, b, N) }
24416}
24417#[doc = "Shift Right and Insert (immediate)"]
24418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
24419#[inline]
24420#[target_feature(enable = "neon")]
24421#[cfg_attr(test, assert_instr(sri, N = 1))]
24422#[rustc_legacy_const_generics(2)]
24423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24424pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24425    static_assert!(N >= 1 && N <= 8);
24426    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24427}
24428#[doc = "Shift Right and Insert (immediate)"]
24429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
24430#[inline]
24431#[target_feature(enable = "neon")]
24432#[cfg_attr(test, assert_instr(sri, N = 1))]
24433#[rustc_legacy_const_generics(2)]
24434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24435pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24436    static_assert!(N >= 1 && N <= 8);
24437    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24438}
24439#[doc = "Shift Right and Insert (immediate)"]
24440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
24441#[inline]
24442#[target_feature(enable = "neon")]
24443#[cfg_attr(test, assert_instr(sri, N = 1))]
24444#[rustc_legacy_const_generics(2)]
24445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24446pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24447    static_assert!(N >= 1 && N <= 16);
24448    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24449}
24450#[doc = "Shift Right and Insert (immediate)"]
24451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
24452#[inline]
24453#[target_feature(enable = "neon")]
24454#[cfg_attr(test, assert_instr(sri, N = 1))]
24455#[rustc_legacy_const_generics(2)]
24456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24457pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24458    static_assert!(N >= 1 && N <= 16);
24459    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24460}
24461#[doc = "Shift Right and Insert (immediate)"]
24462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
24463#[inline]
24464#[target_feature(enable = "neon")]
24465#[cfg_attr(test, assert_instr(sri, N = 1))]
24466#[rustc_legacy_const_generics(2)]
24467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24468pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24469    static_assert!(N >= 1 && N <= 32);
24470    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
24471}
24472#[doc = "Shift Right and Insert (immediate)"]
24473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
24474#[inline]
24475#[target_feature(enable = "neon")]
24476#[cfg_attr(test, assert_instr(sri, N = 1))]
24477#[rustc_legacy_const_generics(2)]
24478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24479pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24480    static_assert!(N >= 1 && N <= 32);
24481    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
24482}
24483#[doc = "Shift Right and Insert (immediate)"]
24484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
24485#[inline]
24486#[target_feature(enable = "neon")]
24487#[cfg_attr(test, assert_instr(sri, N = 1))]
24488#[rustc_legacy_const_generics(2)]
24489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24490pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24491    static_assert!(N >= 1 && N <= 64);
24492    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24493}
24494#[doc = "Shift Right and Insert (immediate)"]
24495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
24496#[inline]
24497#[target_feature(enable = "neon")]
24498#[cfg_attr(test, assert_instr(sri, N = 1))]
24499#[rustc_legacy_const_generics(2)]
24500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24501pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24502    static_assert!(N >= 1 && N <= 64);
24503    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24504}
24505#[doc = "Shift Right and Insert (immediate)"]
24506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
24507#[inline]
24508#[target_feature(enable = "neon")]
24509#[cfg_attr(test, assert_instr(sri, N = 1))]
24510#[rustc_legacy_const_generics(2)]
24511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24512pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24513    static_assert!(N >= 1 && N <= 8);
24514    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24515}
24516#[doc = "Shift Right and Insert (immediate)"]
24517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
24518#[inline]
24519#[target_feature(enable = "neon")]
24520#[cfg_attr(test, assert_instr(sri, N = 1))]
24521#[rustc_legacy_const_generics(2)]
24522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24523pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24524    static_assert!(N >= 1 && N <= 8);
24525    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24526}
24527#[doc = "Shift Right and Insert (immediate)"]
24528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
24529#[inline]
24530#[target_feature(enable = "neon")]
24531#[cfg_attr(test, assert_instr(sri, N = 1))]
24532#[rustc_legacy_const_generics(2)]
24533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24534pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24535    static_assert!(N >= 1 && N <= 16);
24536    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24537}
24538#[doc = "Shift Right and Insert (immediate)"]
24539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
24540#[inline]
24541#[target_feature(enable = "neon")]
24542#[cfg_attr(test, assert_instr(sri, N = 1))]
24543#[rustc_legacy_const_generics(2)]
24544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24545pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24546    static_assert!(N >= 1 && N <= 16);
24547    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24548}
24549#[doc = "Shift Right and Insert (immediate)"]
24550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
24551#[inline]
24552#[target_feature(enable = "neon,aes")]
24553#[cfg_attr(test, assert_instr(sri, N = 1))]
24554#[rustc_legacy_const_generics(2)]
24555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24556pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24557    static_assert!(N >= 1 && N <= 64);
24558    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24559}
24560#[doc = "Shift Right and Insert (immediate)"]
24561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
24562#[inline]
24563#[target_feature(enable = "neon,aes")]
24564#[cfg_attr(test, assert_instr(sri, N = 1))]
24565#[rustc_legacy_const_generics(2)]
24566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24567pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24568    static_assert!(N >= 1 && N <= 64);
24569    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24570}
24571#[doc = "Shift right and insert"]
24572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
24573#[inline]
24574#[target_feature(enable = "neon")]
24575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24576#[rustc_legacy_const_generics(2)]
24577#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
24578pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24579    static_assert!(N >= 1 && N <= 64);
24580    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24581}
24582#[doc = "Shift right and insert"]
24583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
24584#[inline]
24585#[target_feature(enable = "neon")]
24586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24587#[rustc_legacy_const_generics(2)]
24588#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
24589pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24590    static_assert!(N >= 1 && N <= 64);
24591    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
24592}
24593#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
24595#[doc = "## Safety"]
24596#[doc = "  * Neon instrinsic unsafe"]
24597#[inline]
24598#[target_feature(enable = "neon,fp16")]
24599#[cfg_attr(test, assert_instr(str))]
24600#[allow(clippy::cast_ptr_alignment)]
24601#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24602#[cfg(not(target_arch = "arm64ec"))]
24603pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
24604    crate::ptr::write_unaligned(ptr.cast(), a)
24605}
24606#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
24608#[doc = "## Safety"]
24609#[doc = "  * Neon instrinsic unsafe"]
24610#[inline]
24611#[target_feature(enable = "neon,fp16")]
24612#[cfg_attr(test, assert_instr(str))]
24613#[allow(clippy::cast_ptr_alignment)]
24614#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24615#[cfg(not(target_arch = "arm64ec"))]
24616pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
24617    crate::ptr::write_unaligned(ptr.cast(), a)
24618}
24619#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
24621#[doc = "## Safety"]
24622#[doc = "  * Neon instrinsic unsafe"]
24623#[inline]
24624#[target_feature(enable = "neon")]
24625#[cfg_attr(test, assert_instr(str))]
24626#[allow(clippy::cast_ptr_alignment)]
24627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24628pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
24629    crate::ptr::write_unaligned(ptr.cast(), a)
24630}
24631#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
24633#[doc = "## Safety"]
24634#[doc = "  * Neon instrinsic unsafe"]
24635#[inline]
24636#[target_feature(enable = "neon")]
24637#[cfg_attr(test, assert_instr(str))]
24638#[allow(clippy::cast_ptr_alignment)]
24639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24640pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
24641    crate::ptr::write_unaligned(ptr.cast(), a)
24642}
24643#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
24645#[doc = "## Safety"]
24646#[doc = "  * Neon instrinsic unsafe"]
24647#[inline]
24648#[target_feature(enable = "neon")]
24649#[cfg_attr(test, assert_instr(str))]
24650#[allow(clippy::cast_ptr_alignment)]
24651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24652pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
24653    crate::ptr::write_unaligned(ptr.cast(), a)
24654}
24655#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
24657#[doc = "## Safety"]
24658#[doc = "  * Neon instrinsic unsafe"]
24659#[inline]
24660#[target_feature(enable = "neon")]
24661#[cfg_attr(test, assert_instr(str))]
24662#[allow(clippy::cast_ptr_alignment)]
24663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24664pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
24665    crate::ptr::write_unaligned(ptr.cast(), a)
24666}
24667#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
24669#[doc = "## Safety"]
24670#[doc = "  * Neon instrinsic unsafe"]
24671#[inline]
24672#[target_feature(enable = "neon")]
24673#[cfg_attr(test, assert_instr(str))]
24674#[allow(clippy::cast_ptr_alignment)]
24675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24676pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
24677    crate::ptr::write_unaligned(ptr.cast(), a)
24678}
24679#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
24681#[doc = "## Safety"]
24682#[doc = "  * Neon instrinsic unsafe"]
24683#[inline]
24684#[target_feature(enable = "neon")]
24685#[cfg_attr(test, assert_instr(str))]
24686#[allow(clippy::cast_ptr_alignment)]
24687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24688pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
24689    crate::ptr::write_unaligned(ptr.cast(), a)
24690}
24691#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
24693#[doc = "## Safety"]
24694#[doc = "  * Neon instrinsic unsafe"]
24695#[inline]
24696#[target_feature(enable = "neon")]
24697#[cfg_attr(test, assert_instr(str))]
24698#[allow(clippy::cast_ptr_alignment)]
24699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24700pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
24701    crate::ptr::write_unaligned(ptr.cast(), a)
24702}
24703#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
24705#[doc = "## Safety"]
24706#[doc = "  * Neon instrinsic unsafe"]
24707#[inline]
24708#[target_feature(enable = "neon")]
24709#[cfg_attr(test, assert_instr(str))]
24710#[allow(clippy::cast_ptr_alignment)]
24711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24712pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
24713    crate::ptr::write_unaligned(ptr.cast(), a)
24714}
24715#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
24717#[doc = "## Safety"]
24718#[doc = "  * Neon instrinsic unsafe"]
24719#[inline]
24720#[target_feature(enable = "neon")]
24721#[cfg_attr(test, assert_instr(str))]
24722#[allow(clippy::cast_ptr_alignment)]
24723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24724pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
24725    crate::ptr::write_unaligned(ptr.cast(), a)
24726}
24727#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
24729#[doc = "## Safety"]
24730#[doc = "  * Neon instrinsic unsafe"]
24731#[inline]
24732#[target_feature(enable = "neon")]
24733#[cfg_attr(test, assert_instr(str))]
24734#[allow(clippy::cast_ptr_alignment)]
24735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24736pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
24737    crate::ptr::write_unaligned(ptr.cast(), a)
24738}
24739#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
24741#[doc = "## Safety"]
24742#[doc = "  * Neon instrinsic unsafe"]
24743#[inline]
24744#[target_feature(enable = "neon")]
24745#[cfg_attr(test, assert_instr(str))]
24746#[allow(clippy::cast_ptr_alignment)]
24747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24748pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
24749    crate::ptr::write_unaligned(ptr.cast(), a)
24750}
24751#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
24753#[doc = "## Safety"]
24754#[doc = "  * Neon instrinsic unsafe"]
24755#[inline]
24756#[target_feature(enable = "neon")]
24757#[cfg_attr(test, assert_instr(str))]
24758#[allow(clippy::cast_ptr_alignment)]
24759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24760pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
24761    crate::ptr::write_unaligned(ptr.cast(), a)
24762}
24763#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
24765#[doc = "## Safety"]
24766#[doc = "  * Neon instrinsic unsafe"]
24767#[inline]
24768#[target_feature(enable = "neon")]
24769#[cfg_attr(test, assert_instr(str))]
24770#[allow(clippy::cast_ptr_alignment)]
24771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24772pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
24773    crate::ptr::write_unaligned(ptr.cast(), a)
24774}
24775#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
24777#[doc = "## Safety"]
24778#[doc = "  * Neon instrinsic unsafe"]
24779#[inline]
24780#[target_feature(enable = "neon")]
24781#[cfg_attr(test, assert_instr(str))]
24782#[allow(clippy::cast_ptr_alignment)]
24783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24784pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
24785    crate::ptr::write_unaligned(ptr.cast(), a)
24786}
24787#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
24789#[doc = "## Safety"]
24790#[doc = "  * Neon instrinsic unsafe"]
24791#[inline]
24792#[target_feature(enable = "neon")]
24793#[cfg_attr(test, assert_instr(str))]
24794#[allow(clippy::cast_ptr_alignment)]
24795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24796pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
24797    crate::ptr::write_unaligned(ptr.cast(), a)
24798}
24799#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
24801#[doc = "## Safety"]
24802#[doc = "  * Neon instrinsic unsafe"]
24803#[inline]
24804#[target_feature(enable = "neon")]
24805#[cfg_attr(test, assert_instr(str))]
24806#[allow(clippy::cast_ptr_alignment)]
24807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24808pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
24809    crate::ptr::write_unaligned(ptr.cast(), a)
24810}
24811#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
24813#[doc = "## Safety"]
24814#[doc = "  * Neon instrinsic unsafe"]
24815#[inline]
24816#[target_feature(enable = "neon")]
24817#[cfg_attr(test, assert_instr(str))]
24818#[allow(clippy::cast_ptr_alignment)]
24819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24820pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
24821    crate::ptr::write_unaligned(ptr.cast(), a)
24822}
24823#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
24825#[doc = "## Safety"]
24826#[doc = "  * Neon instrinsic unsafe"]
24827#[inline]
24828#[target_feature(enable = "neon")]
24829#[cfg_attr(test, assert_instr(str))]
24830#[allow(clippy::cast_ptr_alignment)]
24831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24832pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
24833    crate::ptr::write_unaligned(ptr.cast(), a)
24834}
24835#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
24837#[doc = "## Safety"]
24838#[doc = "  * Neon instrinsic unsafe"]
24839#[inline]
24840#[target_feature(enable = "neon")]
24841#[cfg_attr(test, assert_instr(str))]
24842#[allow(clippy::cast_ptr_alignment)]
24843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24844pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
24845    crate::ptr::write_unaligned(ptr.cast(), a)
24846}
24847#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
24849#[doc = "## Safety"]
24850#[doc = "  * Neon instrinsic unsafe"]
24851#[inline]
24852#[target_feature(enable = "neon")]
24853#[cfg_attr(test, assert_instr(str))]
24854#[allow(clippy::cast_ptr_alignment)]
24855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24856pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
24857    crate::ptr::write_unaligned(ptr.cast(), a)
24858}
24859#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
24861#[doc = "## Safety"]
24862#[doc = "  * Neon instrinsic unsafe"]
24863#[inline]
24864#[target_feature(enable = "neon")]
24865#[cfg_attr(test, assert_instr(str))]
24866#[allow(clippy::cast_ptr_alignment)]
24867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24868pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
24869    crate::ptr::write_unaligned(ptr.cast(), a)
24870}
24871#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
24873#[doc = "## Safety"]
24874#[doc = "  * Neon instrinsic unsafe"]
24875#[inline]
24876#[target_feature(enable = "neon")]
24877#[cfg_attr(test, assert_instr(str))]
24878#[allow(clippy::cast_ptr_alignment)]
24879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24880pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
24881    crate::ptr::write_unaligned(ptr.cast(), a)
24882}
24883#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
24885#[doc = "## Safety"]
24886#[doc = "  * Neon instrinsic unsafe"]
24887#[inline]
24888#[target_feature(enable = "neon")]
24889#[cfg_attr(test, assert_instr(str))]
24890#[allow(clippy::cast_ptr_alignment)]
24891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24892pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
24893    crate::ptr::write_unaligned(ptr.cast(), a)
24894}
24895#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
24897#[doc = "## Safety"]
24898#[doc = "  * Neon instrinsic unsafe"]
24899#[inline]
24900#[target_feature(enable = "neon")]
24901#[cfg_attr(test, assert_instr(str))]
24902#[allow(clippy::cast_ptr_alignment)]
24903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24904pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
24905    crate::ptr::write_unaligned(ptr.cast(), a)
24906}
24907#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
24909#[doc = "## Safety"]
24910#[doc = "  * Neon instrinsic unsafe"]
24911#[inline]
24912#[target_feature(enable = "neon,aes")]
24913#[cfg_attr(test, assert_instr(str))]
24914#[allow(clippy::cast_ptr_alignment)]
24915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24916pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
24917    crate::ptr::write_unaligned(ptr.cast(), a)
24918}
24919#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
24921#[doc = "## Safety"]
24922#[doc = "  * Neon instrinsic unsafe"]
24923#[inline]
24924#[target_feature(enable = "neon,aes")]
24925#[cfg_attr(test, assert_instr(str))]
24926#[allow(clippy::cast_ptr_alignment)]
24927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24928pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
24929    crate::ptr::write_unaligned(ptr.cast(), a)
24930}
24931#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
24933#[doc = "## Safety"]
24934#[doc = "  * Neon instrinsic unsafe"]
24935#[inline]
24936#[target_feature(enable = "neon")]
24937#[cfg_attr(test, assert_instr(st1))]
24938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24939pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
24940    unsafe extern "unadjusted" {
24941        #[cfg_attr(
24942            any(target_arch = "aarch64", target_arch = "arm64ec"),
24943            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
24944        )]
24945        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
24946    }
24947    _vst1_f64_x2(b.0, b.1, a)
24948}
24949#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
24951#[doc = "## Safety"]
24952#[doc = "  * Neon instrinsic unsafe"]
24953#[inline]
24954#[target_feature(enable = "neon")]
24955#[cfg_attr(test, assert_instr(st1))]
24956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24957pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
24958    unsafe extern "unadjusted" {
24959        #[cfg_attr(
24960            any(target_arch = "aarch64", target_arch = "arm64ec"),
24961            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
24962        )]
24963        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
24964    }
24965    _vst1q_f64_x2(b.0, b.1, a)
24966}
24967#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
24969#[doc = "## Safety"]
24970#[doc = "  * Neon instrinsic unsafe"]
24971#[inline]
24972#[target_feature(enable = "neon")]
24973#[cfg_attr(test, assert_instr(st1))]
24974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24975pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
24976    unsafe extern "unadjusted" {
24977        #[cfg_attr(
24978            any(target_arch = "aarch64", target_arch = "arm64ec"),
24979            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
24980        )]
24981        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
24982    }
24983    _vst1_f64_x3(b.0, b.1, b.2, a)
24984}
24985#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
24987#[doc = "## Safety"]
24988#[doc = "  * Neon instrinsic unsafe"]
24989#[inline]
24990#[target_feature(enable = "neon")]
24991#[cfg_attr(test, assert_instr(st1))]
24992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24993pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
24994    unsafe extern "unadjusted" {
24995        #[cfg_attr(
24996            any(target_arch = "aarch64", target_arch = "arm64ec"),
24997            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
24998        )]
24999        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
25000    }
25001    _vst1q_f64_x3(b.0, b.1, b.2, a)
25002}
25003#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
25005#[doc = "## Safety"]
25006#[doc = "  * Neon instrinsic unsafe"]
25007#[inline]
25008#[target_feature(enable = "neon")]
25009#[cfg_attr(test, assert_instr(st1))]
25010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25011pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
25012    unsafe extern "unadjusted" {
25013        #[cfg_attr(
25014            any(target_arch = "aarch64", target_arch = "arm64ec"),
25015            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
25016        )]
25017        fn _vst1_f64_x4(
25018            a: float64x1_t,
25019            b: float64x1_t,
25020            c: float64x1_t,
25021            d: float64x1_t,
25022            ptr: *mut f64,
25023        );
25024    }
25025    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
25026}
25027#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
25029#[doc = "## Safety"]
25030#[doc = "  * Neon instrinsic unsafe"]
25031#[inline]
25032#[target_feature(enable = "neon")]
25033#[cfg_attr(test, assert_instr(st1))]
25034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25035pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
25036    unsafe extern "unadjusted" {
25037        #[cfg_attr(
25038            any(target_arch = "aarch64", target_arch = "arm64ec"),
25039            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
25040        )]
25041        fn _vst1q_f64_x4(
25042            a: float64x2_t,
25043            b: float64x2_t,
25044            c: float64x2_t,
25045            d: float64x2_t,
25046            ptr: *mut f64,
25047        );
25048    }
25049    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
25050}
25051#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
25053#[doc = "## Safety"]
25054#[doc = "  * Neon instrinsic unsafe"]
25055#[inline]
25056#[target_feature(enable = "neon")]
25057#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25058#[rustc_legacy_const_generics(2)]
25059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25060pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
25061    static_assert!(LANE == 0);
25062    *a = simd_extract!(b, LANE as u32);
25063}
25064#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
25066#[doc = "## Safety"]
25067#[doc = "  * Neon instrinsic unsafe"]
25068#[inline]
25069#[target_feature(enable = "neon")]
25070#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25071#[rustc_legacy_const_generics(2)]
25072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25073pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
25074    static_assert_uimm_bits!(LANE, 1);
25075    *a = simd_extract!(b, LANE as u32);
25076}
25077#[doc = "Store multiple 2-element structures from two registers"]
25078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
25079#[doc = "## Safety"]
25080#[doc = "  * Neon instrinsic unsafe"]
25081#[inline]
25082#[target_feature(enable = "neon")]
25083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25084#[cfg_attr(test, assert_instr(st1))]
25085pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
25086    unsafe extern "unadjusted" {
25087        #[cfg_attr(
25088            any(target_arch = "aarch64", target_arch = "arm64ec"),
25089            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
25090        )]
25091        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
25092    }
25093    _vst2_f64(b.0, b.1, a as _)
25094}
25095#[doc = "Store multiple 2-element structures from two registers"]
25096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
25097#[doc = "## Safety"]
25098#[doc = "  * Neon instrinsic unsafe"]
25099#[inline]
25100#[target_feature(enable = "neon")]
25101#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25102#[rustc_legacy_const_generics(2)]
25103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25104pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
25105    static_assert!(LANE == 0);
25106    unsafe extern "unadjusted" {
25107        #[cfg_attr(
25108            any(target_arch = "aarch64", target_arch = "arm64ec"),
25109            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
25110        )]
25111        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
25112    }
25113    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
25114}
25115#[doc = "Store multiple 2-element structures from two registers"]
25116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
25117#[doc = "## Safety"]
25118#[doc = "  * Neon instrinsic unsafe"]
25119#[inline]
25120#[target_feature(enable = "neon")]
25121#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25122#[rustc_legacy_const_generics(2)]
25123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25124pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
25125    static_assert!(LANE == 0);
25126    unsafe extern "unadjusted" {
25127        #[cfg_attr(
25128            any(target_arch = "aarch64", target_arch = "arm64ec"),
25129            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
25130        )]
25131        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
25132    }
25133    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
25134}
25135#[doc = "Store multiple 2-element structures from two registers"]
25136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
25137#[doc = "## Safety"]
25138#[doc = "  * Neon instrinsic unsafe"]
25139#[inline]
25140#[target_feature(enable = "neon,aes")]
25141#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25142#[rustc_legacy_const_generics(2)]
25143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25144pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
25145    static_assert!(LANE == 0);
25146    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25147}
25148#[doc = "Store multiple 2-element structures from two registers"]
25149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
25150#[doc = "## Safety"]
25151#[doc = "  * Neon instrinsic unsafe"]
25152#[inline]
25153#[target_feature(enable = "neon")]
25154#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25155#[rustc_legacy_const_generics(2)]
25156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25157pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
25158    static_assert!(LANE == 0);
25159    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25160}
25161#[doc = "Store multiple 2-element structures from two registers"]
25162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
25163#[doc = "## Safety"]
25164#[doc = "  * Neon instrinsic unsafe"]
25165#[inline]
25166#[target_feature(enable = "neon")]
25167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25168#[cfg_attr(test, assert_instr(st2))]
25169pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
25170    unsafe extern "unadjusted" {
25171        #[cfg_attr(
25172            any(target_arch = "aarch64", target_arch = "arm64ec"),
25173            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
25174        )]
25175        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
25176    }
25177    _vst2q_f64(b.0, b.1, a as _)
25178}
25179#[doc = "Store multiple 2-element structures from two registers"]
25180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
25181#[doc = "## Safety"]
25182#[doc = "  * Neon instrinsic unsafe"]
25183#[inline]
25184#[target_feature(enable = "neon")]
25185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25186#[cfg_attr(test, assert_instr(st2))]
25187pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
25188    unsafe extern "unadjusted" {
25189        #[cfg_attr(
25190            any(target_arch = "aarch64", target_arch = "arm64ec"),
25191            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
25192        )]
25193        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
25194    }
25195    _vst2q_s64(b.0, b.1, a as _)
25196}
25197#[doc = "Store multiple 2-element structures from two registers"]
25198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
25199#[doc = "## Safety"]
25200#[doc = "  * Neon instrinsic unsafe"]
25201#[inline]
25202#[target_feature(enable = "neon")]
25203#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25204#[rustc_legacy_const_generics(2)]
25205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25206pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
25207    static_assert_uimm_bits!(LANE, 1);
25208    unsafe extern "unadjusted" {
25209        #[cfg_attr(
25210            any(target_arch = "aarch64", target_arch = "arm64ec"),
25211            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
25212        )]
25213        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
25214    }
25215    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
25216}
25217#[doc = "Store multiple 2-element structures from two registers"]
25218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
25219#[doc = "## Safety"]
25220#[doc = "  * Neon instrinsic unsafe"]
25221#[inline]
25222#[target_feature(enable = "neon")]
25223#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25224#[rustc_legacy_const_generics(2)]
25225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25226pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
25227    static_assert_uimm_bits!(LANE, 4);
25228    unsafe extern "unadjusted" {
25229        #[cfg_attr(
25230            any(target_arch = "aarch64", target_arch = "arm64ec"),
25231            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
25232        )]
25233        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
25234    }
25235    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
25236}
25237#[doc = "Store multiple 2-element structures from two registers"]
25238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
25239#[doc = "## Safety"]
25240#[doc = "  * Neon instrinsic unsafe"]
25241#[inline]
25242#[target_feature(enable = "neon")]
25243#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25244#[rustc_legacy_const_generics(2)]
25245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25246pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
25247    static_assert_uimm_bits!(LANE, 1);
25248    unsafe extern "unadjusted" {
25249        #[cfg_attr(
25250            any(target_arch = "aarch64", target_arch = "arm64ec"),
25251            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
25252        )]
25253        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
25254    }
25255    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
25256}
25257#[doc = "Store multiple 2-element structures from two registers"]
25258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
25259#[doc = "## Safety"]
25260#[doc = "  * Neon instrinsic unsafe"]
25261#[inline]
25262#[target_feature(enable = "neon,aes")]
25263#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25264#[rustc_legacy_const_generics(2)]
25265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25266pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
25267    static_assert_uimm_bits!(LANE, 1);
25268    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25269}
25270#[doc = "Store multiple 2-element structures from two registers"]
25271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
25272#[doc = "## Safety"]
25273#[doc = "  * Neon instrinsic unsafe"]
25274#[inline]
25275#[target_feature(enable = "neon")]
25276#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25277#[rustc_legacy_const_generics(2)]
25278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25279pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
25280    static_assert_uimm_bits!(LANE, 4);
25281    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25282}
25283#[doc = "Store multiple 2-element structures from two registers"]
25284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
25285#[doc = "## Safety"]
25286#[doc = "  * Neon instrinsic unsafe"]
25287#[inline]
25288#[target_feature(enable = "neon")]
25289#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25290#[rustc_legacy_const_generics(2)]
25291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25292pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
25293    static_assert_uimm_bits!(LANE, 1);
25294    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25295}
25296#[doc = "Store multiple 2-element structures from two registers"]
25297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
25298#[doc = "## Safety"]
25299#[doc = "  * Neon instrinsic unsafe"]
25300#[inline]
25301#[target_feature(enable = "neon")]
25302#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25303#[rustc_legacy_const_generics(2)]
25304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25305pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
25306    static_assert_uimm_bits!(LANE, 4);
25307    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25308}
25309#[doc = "Store multiple 2-element structures from two registers"]
25310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
25311#[doc = "## Safety"]
25312#[doc = "  * Neon instrinsic unsafe"]
25313#[inline]
25314#[target_feature(enable = "neon,aes")]
25315#[cfg_attr(test, assert_instr(st2))]
25316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25317pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
25318    vst2q_s64(transmute(a), transmute(b))
25319}
25320#[doc = "Store multiple 2-element structures from two registers"]
25321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
25322#[doc = "## Safety"]
25323#[doc = "  * Neon instrinsic unsafe"]
25324#[inline]
25325#[target_feature(enable = "neon")]
25326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25327#[cfg_attr(test, assert_instr(st2))]
25328pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
25329    vst2q_s64(transmute(a), transmute(b))
25330}
25331#[doc = "Store multiple 3-element structures from three registers"]
25332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
25333#[doc = "## Safety"]
25334#[doc = "  * Neon instrinsic unsafe"]
25335#[inline]
25336#[target_feature(enable = "neon")]
25337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25338#[cfg_attr(test, assert_instr(nop))]
25339pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
25340    unsafe extern "unadjusted" {
25341        #[cfg_attr(
25342            any(target_arch = "aarch64", target_arch = "arm64ec"),
25343            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
25344        )]
25345        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
25346    }
25347    _vst3_f64(b.0, b.1, b.2, a as _)
25348}
25349#[doc = "Store multiple 3-element structures from three registers"]
25350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
25351#[doc = "## Safety"]
25352#[doc = "  * Neon instrinsic unsafe"]
25353#[inline]
25354#[target_feature(enable = "neon")]
25355#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25356#[rustc_legacy_const_generics(2)]
25357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25358pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
25359    static_assert!(LANE == 0);
25360    unsafe extern "unadjusted" {
25361        #[cfg_attr(
25362            any(target_arch = "aarch64", target_arch = "arm64ec"),
25363            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
25364        )]
25365        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
25366    }
25367    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25368}
25369#[doc = "Store multiple 3-element structures from three registers"]
25370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
25371#[doc = "## Safety"]
25372#[doc = "  * Neon instrinsic unsafe"]
25373#[inline]
25374#[target_feature(enable = "neon")]
25375#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25376#[rustc_legacy_const_generics(2)]
25377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25378pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
25379    static_assert!(LANE == 0);
25380    unsafe extern "unadjusted" {
25381        #[cfg_attr(
25382            any(target_arch = "aarch64", target_arch = "arm64ec"),
25383            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
25384        )]
25385        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
25386    }
25387    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25388}
25389#[doc = "Store multiple 3-element structures from three registers"]
25390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
25391#[doc = "## Safety"]
25392#[doc = "  * Neon instrinsic unsafe"]
25393#[inline]
25394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25395#[target_feature(enable = "neon,aes")]
25396#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25397#[rustc_legacy_const_generics(2)]
25398pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
25399    static_assert!(LANE == 0);
25400    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25401}
25402#[doc = "Store multiple 3-element structures from three registers"]
25403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
25404#[doc = "## Safety"]
25405#[doc = "  * Neon instrinsic unsafe"]
25406#[inline]
25407#[target_feature(enable = "neon")]
25408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25409#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25410#[rustc_legacy_const_generics(2)]
25411pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
25412    static_assert!(LANE == 0);
25413    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25414}
25415#[doc = "Store multiple 3-element structures from three registers"]
25416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
25417#[doc = "## Safety"]
25418#[doc = "  * Neon instrinsic unsafe"]
25419#[inline]
25420#[target_feature(enable = "neon")]
25421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25422#[cfg_attr(test, assert_instr(st3))]
25423pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
25424    unsafe extern "unadjusted" {
25425        #[cfg_attr(
25426            any(target_arch = "aarch64", target_arch = "arm64ec"),
25427            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
25428        )]
25429        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
25430    }
25431    _vst3q_f64(b.0, b.1, b.2, a as _)
25432}
25433#[doc = "Store multiple 3-element structures from three registers"]
25434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
25435#[doc = "## Safety"]
25436#[doc = "  * Neon instrinsic unsafe"]
25437#[inline]
25438#[target_feature(enable = "neon")]
25439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25440#[cfg_attr(test, assert_instr(st3))]
25441pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
25442    unsafe extern "unadjusted" {
25443        #[cfg_attr(
25444            any(target_arch = "aarch64", target_arch = "arm64ec"),
25445            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
25446        )]
25447        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
25448    }
25449    _vst3q_s64(b.0, b.1, b.2, a as _)
25450}
25451#[doc = "Store multiple 3-element structures from three registers"]
25452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
25453#[doc = "## Safety"]
25454#[doc = "  * Neon instrinsic unsafe"]
25455#[inline]
25456#[target_feature(enable = "neon")]
25457#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25458#[rustc_legacy_const_generics(2)]
25459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25460pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
25461    static_assert_uimm_bits!(LANE, 1);
25462    unsafe extern "unadjusted" {
25463        #[cfg_attr(
25464            any(target_arch = "aarch64", target_arch = "arm64ec"),
25465            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
25466        )]
25467        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
25468    }
25469    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25470}
25471#[doc = "Store multiple 3-element structures from three registers"]
25472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
25473#[doc = "## Safety"]
25474#[doc = "  * Neon instrinsic unsafe"]
25475#[inline]
25476#[target_feature(enable = "neon")]
25477#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25478#[rustc_legacy_const_generics(2)]
25479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25480pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
25481    static_assert_uimm_bits!(LANE, 4);
25482    unsafe extern "unadjusted" {
25483        #[cfg_attr(
25484            any(target_arch = "aarch64", target_arch = "arm64ec"),
25485            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
25486        )]
25487        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
25488    }
25489    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
25490}
25491#[doc = "Store multiple 3-element structures from three registers"]
25492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
25493#[doc = "## Safety"]
25494#[doc = "  * Neon instrinsic unsafe"]
25495#[inline]
25496#[target_feature(enable = "neon")]
25497#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25498#[rustc_legacy_const_generics(2)]
25499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25500pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
25501    static_assert_uimm_bits!(LANE, 1);
25502    unsafe extern "unadjusted" {
25503        #[cfg_attr(
25504            any(target_arch = "aarch64", target_arch = "arm64ec"),
25505            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
25506        )]
25507        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
25508    }
25509    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25510}
25511#[doc = "Store multiple 3-element structures from three registers"]
25512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
25513#[doc = "## Safety"]
25514#[doc = "  * Neon instrinsic unsafe"]
25515#[inline]
25516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25517#[target_feature(enable = "neon,aes")]
25518#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25519#[rustc_legacy_const_generics(2)]
25520pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
25521    static_assert_uimm_bits!(LANE, 1);
25522    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25523}
25524#[doc = "Store multiple 3-element structures from three registers"]
25525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
25526#[doc = "## Safety"]
25527#[doc = "  * Neon instrinsic unsafe"]
25528#[inline]
25529#[target_feature(enable = "neon")]
25530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25531#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25532#[rustc_legacy_const_generics(2)]
25533pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
25534    static_assert_uimm_bits!(LANE, 4);
25535    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25536}
25537#[doc = "Store multiple 3-element structures from three registers"]
25538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
25539#[doc = "## Safety"]
25540#[doc = "  * Neon instrinsic unsafe"]
25541#[inline]
25542#[target_feature(enable = "neon")]
25543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25544#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25545#[rustc_legacy_const_generics(2)]
25546pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
25547    static_assert_uimm_bits!(LANE, 1);
25548    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25549}
25550#[doc = "Store multiple 3-element structures from three registers"]
25551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
25552#[doc = "## Safety"]
25553#[doc = "  * Neon instrinsic unsafe"]
25554#[inline]
25555#[target_feature(enable = "neon")]
25556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25557#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25558#[rustc_legacy_const_generics(2)]
25559pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
25560    static_assert_uimm_bits!(LANE, 4);
25561    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25562}
25563#[doc = "Store multiple 3-element structures from three registers"]
25564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
25565#[doc = "## Safety"]
25566#[doc = "  * Neon instrinsic unsafe"]
25567#[inline]
25568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25569#[target_feature(enable = "neon,aes")]
25570#[cfg_attr(test, assert_instr(st3))]
25571pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
25572    vst3q_s64(transmute(a), transmute(b))
25573}
25574#[doc = "Store multiple 3-element structures from three registers"]
25575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
25576#[doc = "## Safety"]
25577#[doc = "  * Neon instrinsic unsafe"]
25578#[inline]
25579#[target_feature(enable = "neon")]
25580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25581#[cfg_attr(test, assert_instr(st3))]
25582pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
25583    vst3q_s64(transmute(a), transmute(b))
25584}
25585#[doc = "Store multiple 4-element structures from four registers"]
25586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
25587#[doc = "## Safety"]
25588#[doc = "  * Neon instrinsic unsafe"]
25589#[inline]
25590#[target_feature(enable = "neon")]
25591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25592#[cfg_attr(test, assert_instr(nop))]
25593pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
25594    unsafe extern "unadjusted" {
25595        #[cfg_attr(
25596            any(target_arch = "aarch64", target_arch = "arm64ec"),
25597            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
25598        )]
25599        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
25600    }
25601    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
25602}
25603#[doc = "Store multiple 4-element structures from four registers"]
25604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
25605#[doc = "## Safety"]
25606#[doc = "  * Neon instrinsic unsafe"]
25607#[inline]
25608#[target_feature(enable = "neon")]
25609#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25610#[rustc_legacy_const_generics(2)]
25611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25612pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
25613    static_assert!(LANE == 0);
25614    unsafe extern "unadjusted" {
25615        #[cfg_attr(
25616            any(target_arch = "aarch64", target_arch = "arm64ec"),
25617            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
25618        )]
25619        fn _vst4_lane_f64(
25620            a: float64x1_t,
25621            b: float64x1_t,
25622            c: float64x1_t,
25623            d: float64x1_t,
25624            n: i64,
25625            ptr: *mut i8,
25626        );
25627    }
25628    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25629}
25630#[doc = "Store multiple 4-element structures from four registers"]
25631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
25632#[doc = "## Safety"]
25633#[doc = "  * Neon instrinsic unsafe"]
25634#[inline]
25635#[target_feature(enable = "neon")]
25636#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25637#[rustc_legacy_const_generics(2)]
25638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25639pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
25640    static_assert!(LANE == 0);
25641    unsafe extern "unadjusted" {
25642        #[cfg_attr(
25643            any(target_arch = "aarch64", target_arch = "arm64ec"),
25644            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
25645        )]
25646        fn _vst4_lane_s64(
25647            a: int64x1_t,
25648            b: int64x1_t,
25649            c: int64x1_t,
25650            d: int64x1_t,
25651            n: i64,
25652            ptr: *mut i8,
25653        );
25654    }
25655    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25656}
25657#[doc = "Store multiple 4-element structures from four registers"]
25658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
25659#[doc = "## Safety"]
25660#[doc = "  * Neon instrinsic unsafe"]
25661#[inline]
25662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25663#[target_feature(enable = "neon,aes")]
25664#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25665#[rustc_legacy_const_generics(2)]
25666pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
25667    static_assert!(LANE == 0);
25668    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25669}
25670#[doc = "Store multiple 4-element structures from four registers"]
25671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
25672#[doc = "## Safety"]
25673#[doc = "  * Neon instrinsic unsafe"]
25674#[inline]
25675#[target_feature(enable = "neon")]
25676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25677#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25678#[rustc_legacy_const_generics(2)]
25679pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
25680    static_assert!(LANE == 0);
25681    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25682}
25683#[doc = "Store multiple 4-element structures from four registers"]
25684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
25685#[doc = "## Safety"]
25686#[doc = "  * Neon instrinsic unsafe"]
25687#[inline]
25688#[target_feature(enable = "neon")]
25689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25690#[cfg_attr(test, assert_instr(st4))]
25691pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
25692    unsafe extern "unadjusted" {
25693        #[cfg_attr(
25694            any(target_arch = "aarch64", target_arch = "arm64ec"),
25695            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
25696        )]
25697        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
25698    }
25699    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
25700}
25701#[doc = "Store multiple 4-element structures from four registers"]
25702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
25703#[doc = "## Safety"]
25704#[doc = "  * Neon instrinsic unsafe"]
25705#[inline]
25706#[target_feature(enable = "neon")]
25707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25708#[cfg_attr(test, assert_instr(st4))]
25709pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
25710    unsafe extern "unadjusted" {
25711        #[cfg_attr(
25712            any(target_arch = "aarch64", target_arch = "arm64ec"),
25713            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
25714        )]
25715        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
25716    }
25717    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
25718}
25719#[doc = "Store multiple 4-element structures from four registers"]
25720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
25721#[doc = "## Safety"]
25722#[doc = "  * Neon instrinsic unsafe"]
25723#[inline]
25724#[target_feature(enable = "neon")]
25725#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25726#[rustc_legacy_const_generics(2)]
25727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25728pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
25729    static_assert_uimm_bits!(LANE, 1);
25730    unsafe extern "unadjusted" {
25731        #[cfg_attr(
25732            any(target_arch = "aarch64", target_arch = "arm64ec"),
25733            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
25734        )]
25735        fn _vst4q_lane_f64(
25736            a: float64x2_t,
25737            b: float64x2_t,
25738            c: float64x2_t,
25739            d: float64x2_t,
25740            n: i64,
25741            ptr: *mut i8,
25742        );
25743    }
25744    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25745}
25746#[doc = "Store multiple 4-element structures from four registers"]
25747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
25748#[doc = "## Safety"]
25749#[doc = "  * Neon instrinsic unsafe"]
25750#[inline]
25751#[target_feature(enable = "neon")]
25752#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25753#[rustc_legacy_const_generics(2)]
25754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25755pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
25756    static_assert_uimm_bits!(LANE, 4);
25757    unsafe extern "unadjusted" {
25758        #[cfg_attr(
25759            any(target_arch = "aarch64", target_arch = "arm64ec"),
25760            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
25761        )]
25762        fn _vst4q_lane_s8(
25763            a: int8x16_t,
25764            b: int8x16_t,
25765            c: int8x16_t,
25766            d: int8x16_t,
25767            n: i64,
25768            ptr: *mut i8,
25769        );
25770    }
25771    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25772}
25773#[doc = "Store multiple 4-element structures from four registers"]
25774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
25775#[doc = "## Safety"]
25776#[doc = "  * Neon instrinsic unsafe"]
25777#[inline]
25778#[target_feature(enable = "neon")]
25779#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25780#[rustc_legacy_const_generics(2)]
25781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25782pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
25783    static_assert_uimm_bits!(LANE, 1);
25784    unsafe extern "unadjusted" {
25785        #[cfg_attr(
25786            any(target_arch = "aarch64", target_arch = "arm64ec"),
25787            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
25788        )]
25789        fn _vst4q_lane_s64(
25790            a: int64x2_t,
25791            b: int64x2_t,
25792            c: int64x2_t,
25793            d: int64x2_t,
25794            n: i64,
25795            ptr: *mut i8,
25796        );
25797    }
25798    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25799}
25800#[doc = "Store multiple 4-element structures from four registers"]
25801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
25802#[doc = "## Safety"]
25803#[doc = "  * Neon instrinsic unsafe"]
25804#[inline]
25805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25806#[target_feature(enable = "neon,aes")]
25807#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25808#[rustc_legacy_const_generics(2)]
25809pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
25810    static_assert_uimm_bits!(LANE, 1);
25811    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25812}
25813#[doc = "Store multiple 4-element structures from four registers"]
25814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
25815#[doc = "## Safety"]
25816#[doc = "  * Neon instrinsic unsafe"]
25817#[inline]
25818#[target_feature(enable = "neon")]
25819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25820#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25821#[rustc_legacy_const_generics(2)]
25822pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
25823    static_assert_uimm_bits!(LANE, 4);
25824    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25825}
25826#[doc = "Store multiple 4-element structures from four registers"]
25827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
25828#[doc = "## Safety"]
25829#[doc = "  * Neon instrinsic unsafe"]
25830#[inline]
25831#[target_feature(enable = "neon")]
25832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25833#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25834#[rustc_legacy_const_generics(2)]
25835pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
25836    static_assert_uimm_bits!(LANE, 1);
25837    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25838}
25839#[doc = "Store multiple 4-element structures from four registers"]
25840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
25841#[doc = "## Safety"]
25842#[doc = "  * Neon instrinsic unsafe"]
25843#[inline]
25844#[target_feature(enable = "neon")]
25845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25846#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25847#[rustc_legacy_const_generics(2)]
25848pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
25849    static_assert_uimm_bits!(LANE, 4);
25850    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25851}
25852#[doc = "Store multiple 4-element structures from four registers"]
25853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
25854#[doc = "## Safety"]
25855#[doc = "  * Neon instrinsic unsafe"]
25856#[inline]
25857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25858#[target_feature(enable = "neon,aes")]
25859#[cfg_attr(test, assert_instr(st4))]
25860pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
25861    vst4q_s64(transmute(a), transmute(b))
25862}
25863#[doc = "Store multiple 4-element structures from four registers"]
25864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
25865#[doc = "## Safety"]
25866#[doc = "  * Neon instrinsic unsafe"]
25867#[inline]
25868#[target_feature(enable = "neon")]
25869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25870#[cfg_attr(test, assert_instr(st4))]
25871pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
25872    vst4q_s64(transmute(a), transmute(b))
25873}
25874#[doc = "Subtract"]
25875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
25876#[inline]
25877#[target_feature(enable = "neon")]
25878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25879#[cfg_attr(test, assert_instr(fsub))]
25880pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
25881    unsafe { simd_sub(a, b) }
25882}
25883#[doc = "Subtract"]
25884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
25885#[inline]
25886#[target_feature(enable = "neon")]
25887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25888#[cfg_attr(test, assert_instr(fsub))]
25889pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
25890    unsafe { simd_sub(a, b) }
25891}
25892#[doc = "Subtract"]
25893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
25894#[inline]
25895#[target_feature(enable = "neon")]
25896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25897#[cfg_attr(test, assert_instr(nop))]
25898pub fn vsubd_s64(a: i64, b: i64) -> i64 {
25899    a.wrapping_sub(b)
25900}
25901#[doc = "Subtract"]
25902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
25903#[inline]
25904#[target_feature(enable = "neon")]
25905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25906#[cfg_attr(test, assert_instr(nop))]
25907pub fn vsubd_u64(a: u64, b: u64) -> u64 {
25908    a.wrapping_sub(b)
25909}
25910#[doc = "Subtract"]
25911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
25912#[inline]
25913#[target_feature(enable = "neon,fp16")]
25914#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25915#[cfg(not(target_arch = "arm64ec"))]
25916#[cfg_attr(test, assert_instr(nop))]
25917pub fn vsubh_f16(a: f16, b: f16) -> f16 {
25918    a - b
25919}
25920#[doc = "Signed Subtract Long"]
25921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
25922#[inline]
25923#[target_feature(enable = "neon")]
25924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25925#[cfg_attr(test, assert_instr(ssubl2))]
25926pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
25927    unsafe {
25928        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25929        let d: int16x8_t = simd_cast(c);
25930        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25931        let f: int16x8_t = simd_cast(e);
25932        simd_sub(d, f)
25933    }
25934}
25935#[doc = "Signed Subtract Long"]
25936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
25937#[inline]
25938#[target_feature(enable = "neon")]
25939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25940#[cfg_attr(test, assert_instr(ssubl2))]
25941pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
25942    unsafe {
25943        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25944        let d: int32x4_t = simd_cast(c);
25945        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25946        let f: int32x4_t = simd_cast(e);
25947        simd_sub(d, f)
25948    }
25949}
25950#[doc = "Signed Subtract Long"]
25951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
25952#[inline]
25953#[target_feature(enable = "neon")]
25954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25955#[cfg_attr(test, assert_instr(ssubl2))]
25956pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
25957    unsafe {
25958        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
25959        let d: int64x2_t = simd_cast(c);
25960        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25961        let f: int64x2_t = simd_cast(e);
25962        simd_sub(d, f)
25963    }
25964}
25965#[doc = "Unsigned Subtract Long"]
25966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
25967#[inline]
25968#[target_feature(enable = "neon")]
25969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25970#[cfg_attr(test, assert_instr(usubl2))]
25971pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
25972    unsafe {
25973        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25974        let d: uint16x8_t = simd_cast(c);
25975        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25976        let f: uint16x8_t = simd_cast(e);
25977        simd_sub(d, f)
25978    }
25979}
25980#[doc = "Unsigned Subtract Long"]
25981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
25982#[inline]
25983#[target_feature(enable = "neon")]
25984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25985#[cfg_attr(test, assert_instr(usubl2))]
25986pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
25987    unsafe {
25988        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25989        let d: uint32x4_t = simd_cast(c);
25990        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25991        let f: uint32x4_t = simd_cast(e);
25992        simd_sub(d, f)
25993    }
25994}
25995#[doc = "Unsigned Subtract Long"]
25996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
25997#[inline]
25998#[target_feature(enable = "neon")]
25999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26000#[cfg_attr(test, assert_instr(usubl2))]
26001pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
26002    unsafe {
26003        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
26004        let d: uint64x2_t = simd_cast(c);
26005        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26006        let f: uint64x2_t = simd_cast(e);
26007        simd_sub(d, f)
26008    }
26009}
26010#[doc = "Signed Subtract Wide"]
26011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
26012#[inline]
26013#[target_feature(enable = "neon")]
26014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26015#[cfg_attr(test, assert_instr(ssubw2))]
26016pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
26017    unsafe {
26018        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26019        simd_sub(a, simd_cast(c))
26020    }
26021}
26022#[doc = "Signed Subtract Wide"]
26023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
26024#[inline]
26025#[target_feature(enable = "neon")]
26026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26027#[cfg_attr(test, assert_instr(ssubw2))]
26028pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
26029    unsafe {
26030        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26031        simd_sub(a, simd_cast(c))
26032    }
26033}
26034#[doc = "Signed Subtract Wide"]
26035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
26036#[inline]
26037#[target_feature(enable = "neon")]
26038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26039#[cfg_attr(test, assert_instr(ssubw2))]
26040pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
26041    unsafe {
26042        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26043        simd_sub(a, simd_cast(c))
26044    }
26045}
26046#[doc = "Unsigned Subtract Wide"]
26047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
26048#[inline]
26049#[target_feature(enable = "neon")]
26050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26051#[cfg_attr(test, assert_instr(usubw2))]
26052pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
26053    unsafe {
26054        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26055        simd_sub(a, simd_cast(c))
26056    }
26057}
26058#[doc = "Unsigned Subtract Wide"]
26059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
26060#[inline]
26061#[target_feature(enable = "neon")]
26062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26063#[cfg_attr(test, assert_instr(usubw2))]
26064pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
26065    unsafe {
26066        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26067        simd_sub(a, simd_cast(c))
26068    }
26069}
26070#[doc = "Unsigned Subtract Wide"]
26071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
26072#[inline]
26073#[target_feature(enable = "neon")]
26074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26075#[cfg_attr(test, assert_instr(usubw2))]
26076pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
26077    unsafe {
26078        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26079        simd_sub(a, simd_cast(c))
26080    }
26081}
26082#[doc = "Dot product index form with signed and unsigned integers"]
26083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
26084#[inline]
26085#[target_feature(enable = "neon,i8mm")]
26086#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26087#[rustc_legacy_const_generics(3)]
26088#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26089pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
26090    static_assert_uimm_bits!(LANE, 2);
26091    unsafe {
26092        let c: uint32x4_t = transmute(c);
26093        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
26094        vusdot_s32(a, transmute(c), b)
26095    }
26096}
26097#[doc = "Dot product index form with signed and unsigned integers"]
26098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
26099#[inline]
26100#[target_feature(enable = "neon,i8mm")]
26101#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26102#[rustc_legacy_const_generics(3)]
26103#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26104pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
26105    static_assert_uimm_bits!(LANE, 2);
26106    unsafe {
26107        let c: uint32x4_t = transmute(c);
26108        let c: uint32x4_t =
26109            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
26110        vusdotq_s32(a, transmute(c), b)
26111    }
26112}
26113#[doc = "Table look-up"]
26114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
26115#[inline]
26116#[target_feature(enable = "neon")]
26117#[cfg_attr(test, assert_instr(tbl))]
26118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26119pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26120    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
26121        {
26122            transmute(b)
26123        }
26124    })
26125}
26126#[doc = "Table look-up"]
26127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
26128#[inline]
26129#[target_feature(enable = "neon")]
26130#[cfg_attr(test, assert_instr(tbl))]
26131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26132pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26133    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
26134}
26135#[doc = "Table look-up"]
26136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
26137#[inline]
26138#[target_feature(enable = "neon")]
26139#[cfg_attr(test, assert_instr(tbl))]
26140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26141pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
26142    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
26143}
26144#[doc = "Table look-up"]
26145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
26146#[inline]
26147#[target_feature(enable = "neon")]
26148#[cfg_attr(test, assert_instr(tbl))]
26149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26150pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
26151    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
26152}
26153#[doc = "Table look-up"]
26154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26155#[inline]
26156#[cfg(target_endian = "little")]
26157#[target_feature(enable = "neon")]
26158#[cfg_attr(test, assert_instr(tbl))]
26159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26160pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26161    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
26162}
26163#[doc = "Table look-up"]
26164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26165#[inline]
26166#[cfg(target_endian = "big")]
26167#[target_feature(enable = "neon")]
26168#[cfg_attr(test, assert_instr(tbl))]
26169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26170pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26171    let mut a: uint8x8x2_t = a;
26172    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26173    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26174    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26175    unsafe {
26176        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
26177        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26178    }
26179}
26180#[doc = "Table look-up"]
26181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
26182#[inline]
26183#[cfg(target_endian = "little")]
26184#[target_feature(enable = "neon")]
26185#[cfg_attr(test, assert_instr(tbl))]
26186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26187pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
26188    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
26189}
26190#[doc = "Table look-up"]
26191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
26192#[inline]
26193#[cfg(target_endian = "big")]
26194#[target_feature(enable = "neon")]
26195#[cfg_attr(test, assert_instr(tbl))]
26196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26197pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
26198    let mut a: poly8x8x2_t = a;
26199    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26200    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26201    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26202    unsafe {
26203        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
26204        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26205    }
26206}
26207#[doc = "Table look-up"]
26208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
26209#[inline]
26210#[target_feature(enable = "neon")]
26211#[cfg_attr(test, assert_instr(tbl))]
26212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26213pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
26214    let x = int8x16x2_t(
26215        vcombine_s8(a.0, a.1),
26216        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
26217    );
26218    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26219}
26220#[doc = "Table look-up"]
26221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
26222#[inline]
26223#[cfg(target_endian = "little")]
26224#[target_feature(enable = "neon")]
26225#[cfg_attr(test, assert_instr(tbl))]
26226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26227pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
26228    let x = uint8x16x2_t(
26229        vcombine_u8(a.0, a.1),
26230        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
26231    );
26232    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26233}
26234#[doc = "Table look-up"]
26235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
26236#[inline]
26237#[cfg(target_endian = "big")]
26238#[target_feature(enable = "neon")]
26239#[cfg_attr(test, assert_instr(tbl))]
26240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26241pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
26242    let mut a: uint8x8x3_t = a;
26243    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26244    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26245    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26246    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26247    let x = uint8x16x2_t(
26248        vcombine_u8(a.0, a.1),
26249        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
26250    );
26251    unsafe {
26252        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26253        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26254    }
26255}
26256#[doc = "Table look-up"]
26257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
26258#[inline]
26259#[cfg(target_endian = "little")]
26260#[target_feature(enable = "neon")]
26261#[cfg_attr(test, assert_instr(tbl))]
26262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26263pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
26264    let x = poly8x16x2_t(
26265        vcombine_p8(a.0, a.1),
26266        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
26267    );
26268    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26269}
26270#[doc = "Table look-up"]
26271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
26272#[inline]
26273#[cfg(target_endian = "big")]
26274#[target_feature(enable = "neon")]
26275#[cfg_attr(test, assert_instr(tbl))]
26276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26277pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
26278    let mut a: poly8x8x3_t = a;
26279    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26280    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26281    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26282    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26283    let x = poly8x16x2_t(
26284        vcombine_p8(a.0, a.1),
26285        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
26286    );
26287    unsafe {
26288        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26289        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26290    }
26291}
26292#[doc = "Table look-up"]
26293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
26294#[inline]
26295#[target_feature(enable = "neon")]
26296#[cfg_attr(test, assert_instr(tbl))]
26297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26298pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
26299    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
26300    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26301}
26302#[doc = "Table look-up"]
26303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
26304#[inline]
26305#[cfg(target_endian = "little")]
26306#[target_feature(enable = "neon")]
26307#[cfg_attr(test, assert_instr(tbl))]
26308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26309pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
26310    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
26311    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26312}
26313#[doc = "Table look-up"]
26314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
26315#[inline]
26316#[cfg(target_endian = "big")]
26317#[target_feature(enable = "neon")]
26318#[cfg_attr(test, assert_instr(tbl))]
26319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26320pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
26321    let mut a: uint8x8x4_t = a;
26322    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26323    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26324    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26325    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26326    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26327    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
26328    unsafe {
26329        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26330        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26331    }
26332}
26333#[doc = "Table look-up"]
26334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
26335#[inline]
26336#[cfg(target_endian = "little")]
26337#[target_feature(enable = "neon")]
26338#[cfg_attr(test, assert_instr(tbl))]
26339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26340pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
26341    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
26342    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26343}
26344#[doc = "Table look-up"]
26345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
26346#[inline]
26347#[cfg(target_endian = "big")]
26348#[target_feature(enable = "neon")]
26349#[cfg_attr(test, assert_instr(tbl))]
26350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26351pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
26352    let mut a: poly8x8x4_t = a;
26353    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26354    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26355    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26356    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26357    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26358    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
26359    unsafe {
26360        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26361        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26362    }
26363}
26364#[doc = "Extended table look-up"]
26365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
26366#[inline]
26367#[target_feature(enable = "neon")]
26368#[cfg_attr(test, assert_instr(tbx))]
26369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26370pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
26371    unsafe {
26372        simd_select(
26373            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
26374            transmute(vqtbx1(
26375                transmute(a),
26376                transmute(vcombine_s8(b, crate::mem::zeroed())),
26377                transmute(c),
26378            )),
26379            a,
26380        )
26381    }
26382}
26383#[doc = "Extended table look-up"]
26384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
26385#[inline]
26386#[target_feature(enable = "neon")]
26387#[cfg_attr(test, assert_instr(tbx))]
26388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26389pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
26390    unsafe {
26391        simd_select(
26392            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26393            transmute(vqtbx1(
26394                transmute(a),
26395                transmute(vcombine_u8(b, crate::mem::zeroed())),
26396                c,
26397            )),
26398            a,
26399        )
26400    }
26401}
26402#[doc = "Extended table look-up"]
26403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
26404#[inline]
26405#[target_feature(enable = "neon")]
26406#[cfg_attr(test, assert_instr(tbx))]
26407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26408pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
26409    unsafe {
26410        simd_select(
26411            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26412            transmute(vqtbx1(
26413                transmute(a),
26414                transmute(vcombine_p8(b, crate::mem::zeroed())),
26415                c,
26416            )),
26417            a,
26418        )
26419    }
26420}
26421#[doc = "Extended table look-up"]
26422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
26423#[inline]
26424#[target_feature(enable = "neon")]
26425#[cfg_attr(test, assert_instr(tbx))]
26426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26427pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
26428    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
26429}
26430#[doc = "Extended table look-up"]
26431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
26432#[inline]
26433#[cfg(target_endian = "little")]
26434#[target_feature(enable = "neon")]
26435#[cfg_attr(test, assert_instr(tbx))]
26436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26437pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
26438    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
26439}
26440#[doc = "Extended table look-up"]
26441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
26442#[inline]
26443#[cfg(target_endian = "big")]
26444#[target_feature(enable = "neon")]
26445#[cfg_attr(test, assert_instr(tbx))]
26446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26447pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
26448    let mut b: uint8x8x2_t = b;
26449    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26450    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26451    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26452    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26453    unsafe {
26454        let ret_val: uint8x8_t =
26455            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
26456        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26457    }
26458}
26459#[doc = "Extended table look-up"]
26460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
26461#[inline]
26462#[cfg(target_endian = "little")]
26463#[target_feature(enable = "neon")]
26464#[cfg_attr(test, assert_instr(tbx))]
26465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26466pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
26467    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
26468}
26469#[doc = "Extended table look-up"]
26470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
26471#[inline]
26472#[cfg(target_endian = "big")]
26473#[target_feature(enable = "neon")]
26474#[cfg_attr(test, assert_instr(tbx))]
26475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26476pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
26477    let mut b: poly8x8x2_t = b;
26478    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26479    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26480    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26481    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26482    unsafe {
26483        let ret_val: poly8x8_t =
26484            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
26485        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26486    }
26487}
26488#[doc = "Extended table look-up"]
26489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
26490#[inline]
26491#[target_feature(enable = "neon")]
26492#[cfg_attr(test, assert_instr(tbx))]
26493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26494pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
26495    let x = int8x16x2_t(
26496        vcombine_s8(b.0, b.1),
26497        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
26498    );
26499    unsafe {
26500        transmute(simd_select(
26501            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
26502            transmute(vqtbx2(
26503                transmute(a),
26504                transmute(x.0),
26505                transmute(x.1),
26506                transmute(c),
26507            )),
26508            a,
26509        ))
26510    }
26511}
26512#[doc = "Extended table look-up"]
26513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
26514#[inline]
26515#[cfg(target_endian = "little")]
26516#[target_feature(enable = "neon")]
26517#[cfg_attr(test, assert_instr(tbx))]
26518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26519pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
26520    let x = uint8x16x2_t(
26521        vcombine_u8(b.0, b.1),
26522        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
26523    );
26524    unsafe {
26525        transmute(simd_select(
26526            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26527            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26528            a,
26529        ))
26530    }
26531}
26532#[doc = "Extended table look-up"]
26533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
26534#[inline]
26535#[cfg(target_endian = "big")]
26536#[target_feature(enable = "neon")]
26537#[cfg_attr(test, assert_instr(tbx))]
26538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26539pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
26540    let mut b: uint8x8x3_t = b;
26541    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26542    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26543    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26544    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26545    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26546    let x = uint8x16x2_t(
26547        vcombine_u8(b.0, b.1),
26548        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
26549    );
26550    unsafe {
26551        let ret_val: uint8x8_t = transmute(simd_select(
26552            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26553            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26554            a,
26555        ));
26556        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26557    }
26558}
26559#[doc = "Extended table look-up"]
26560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
26561#[inline]
26562#[cfg(target_endian = "little")]
26563#[target_feature(enable = "neon")]
26564#[cfg_attr(test, assert_instr(tbx))]
26565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26566pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
26567    let x = poly8x16x2_t(
26568        vcombine_p8(b.0, b.1),
26569        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
26570    );
26571    unsafe {
26572        transmute(simd_select(
26573            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26574            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26575            a,
26576        ))
26577    }
26578}
26579#[doc = "Extended table look-up"]
26580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
26581#[inline]
26582#[cfg(target_endian = "big")]
26583#[target_feature(enable = "neon")]
26584#[cfg_attr(test, assert_instr(tbx))]
26585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26586pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
26587    let mut b: poly8x8x3_t = b;
26588    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26589    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26590    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26591    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26592    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26593    let x = poly8x16x2_t(
26594        vcombine_p8(b.0, b.1),
26595        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
26596    );
26597    unsafe {
26598        let ret_val: poly8x8_t = transmute(simd_select(
26599            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26600            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26601            a,
26602        ));
26603        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26604    }
26605}
26606#[doc = "Extended table look-up"]
26607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
26608#[inline]
26609#[target_feature(enable = "neon")]
26610#[cfg_attr(test, assert_instr(tbx))]
26611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26612pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
26613    unsafe {
26614        vqtbx2(
26615            transmute(a),
26616            transmute(vcombine_s8(b.0, b.1)),
26617            transmute(vcombine_s8(b.2, b.3)),
26618            transmute(c),
26619        )
26620    }
26621}
26622#[doc = "Extended table look-up"]
26623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
26624#[inline]
26625#[cfg(target_endian = "little")]
26626#[target_feature(enable = "neon")]
26627#[cfg_attr(test, assert_instr(tbx))]
26628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26629pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
26630    unsafe {
26631        transmute(vqtbx2(
26632            transmute(a),
26633            transmute(vcombine_u8(b.0, b.1)),
26634            transmute(vcombine_u8(b.2, b.3)),
26635            c,
26636        ))
26637    }
26638}
26639#[doc = "Extended table look-up"]
26640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
26641#[inline]
26642#[cfg(target_endian = "big")]
26643#[target_feature(enable = "neon")]
26644#[cfg_attr(test, assert_instr(tbx))]
26645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26646pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
26647    let mut b: uint8x8x4_t = b;
26648    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26649    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26650    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26651    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26652    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26653    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26654    unsafe {
26655        let ret_val: uint8x8_t = transmute(vqtbx2(
26656            transmute(a),
26657            transmute(vcombine_u8(b.0, b.1)),
26658            transmute(vcombine_u8(b.2, b.3)),
26659            c,
26660        ));
26661        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26662    }
26663}
26664#[doc = "Extended table look-up"]
26665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
26666#[inline]
26667#[cfg(target_endian = "little")]
26668#[target_feature(enable = "neon")]
26669#[cfg_attr(test, assert_instr(tbx))]
26670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26671pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
26672    unsafe {
26673        transmute(vqtbx2(
26674            transmute(a),
26675            transmute(vcombine_p8(b.0, b.1)),
26676            transmute(vcombine_p8(b.2, b.3)),
26677            c,
26678        ))
26679    }
26680}
26681#[doc = "Extended table look-up"]
26682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
26683#[inline]
26684#[cfg(target_endian = "big")]
26685#[target_feature(enable = "neon")]
26686#[cfg_attr(test, assert_instr(tbx))]
26687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26688pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
26689    let mut b: poly8x8x4_t = b;
26690    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
26691    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26692    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26693    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26694    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26695    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
26696    unsafe {
26697        let ret_val: poly8x8_t = transmute(vqtbx2(
26698            transmute(a),
26699            transmute(vcombine_p8(b.0, b.1)),
26700            transmute(vcombine_p8(b.2, b.3)),
26701            c,
26702        ));
26703        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26704    }
26705}
26706#[doc = "Transpose vectors"]
26707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
26708#[inline]
26709#[target_feature(enable = "neon,fp16")]
26710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26711#[cfg(not(target_arch = "arm64ec"))]
26712#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26713pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26714    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26715}
26716#[doc = "Transpose vectors"]
26717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
26718#[inline]
26719#[target_feature(enable = "neon,fp16")]
26720#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26721#[cfg(not(target_arch = "arm64ec"))]
26722#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26723pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26724    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26725}
26726#[doc = "Transpose vectors"]
26727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
26728#[inline]
26729#[target_feature(enable = "neon")]
26730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26731#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26732pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26733    unsafe { simd_shuffle!(a, b, [0, 2]) }
26734}
26735#[doc = "Transpose vectors"]
26736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
26737#[inline]
26738#[target_feature(enable = "neon")]
26739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26740#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26741pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26742    unsafe { simd_shuffle!(a, b, [0, 2]) }
26743}
26744#[doc = "Transpose vectors"]
26745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
26746#[inline]
26747#[target_feature(enable = "neon")]
26748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26749#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26750pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26751    unsafe { simd_shuffle!(a, b, [0, 2]) }
26752}
26753#[doc = "Transpose vectors"]
26754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
26755#[inline]
26756#[target_feature(enable = "neon")]
26757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26758#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26759pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26760    unsafe { simd_shuffle!(a, b, [0, 2]) }
26761}
26762#[doc = "Transpose vectors"]
26763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
26764#[inline]
26765#[target_feature(enable = "neon")]
26766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26767#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26768pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26769    unsafe { simd_shuffle!(a, b, [0, 2]) }
26770}
26771#[doc = "Transpose vectors"]
26772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
26773#[inline]
26774#[target_feature(enable = "neon")]
26775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26776#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26777pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26778    unsafe { simd_shuffle!(a, b, [0, 2]) }
26779}
26780#[doc = "Transpose vectors"]
26781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
26782#[inline]
26783#[target_feature(enable = "neon")]
26784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26785#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26786pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26787    unsafe { simd_shuffle!(a, b, [0, 2]) }
26788}
26789#[doc = "Transpose vectors"]
26790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
26791#[inline]
26792#[target_feature(enable = "neon")]
26793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26794#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26795pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26796    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26797}
26798#[doc = "Transpose vectors"]
26799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
26800#[inline]
26801#[target_feature(enable = "neon")]
26802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26804pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26805    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26806}
26807#[doc = "Transpose vectors"]
26808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
26809#[inline]
26810#[target_feature(enable = "neon")]
26811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26812#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26813pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26814    unsafe {
26815        simd_shuffle!(
26816            a,
26817            b,
26818            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26819        )
26820    }
26821}
26822#[doc = "Transpose vectors"]
26823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
26824#[inline]
26825#[target_feature(enable = "neon")]
26826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26827#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26828pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26829    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26830}
26831#[doc = "Transpose vectors"]
26832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
26833#[inline]
26834#[target_feature(enable = "neon")]
26835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26836#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26837pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26838    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26839}
26840#[doc = "Transpose vectors"]
26841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
26842#[inline]
26843#[target_feature(enable = "neon")]
26844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26845#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26846pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26847    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26848}
26849#[doc = "Transpose vectors"]
26850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
26851#[inline]
26852#[target_feature(enable = "neon")]
26853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26854#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26855pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26856    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26857}
26858#[doc = "Transpose vectors"]
26859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
26860#[inline]
26861#[target_feature(enable = "neon")]
26862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26864pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26865    unsafe {
26866        simd_shuffle!(
26867            a,
26868            b,
26869            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26870        )
26871    }
26872}
26873#[doc = "Transpose vectors"]
26874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
26875#[inline]
26876#[target_feature(enable = "neon")]
26877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26878#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26879pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26880    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26881}
26882#[doc = "Transpose vectors"]
26883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
26884#[inline]
26885#[target_feature(enable = "neon")]
26886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26887#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26888pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26889    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26890}
26891#[doc = "Transpose vectors"]
26892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
26893#[inline]
26894#[target_feature(enable = "neon")]
26895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26896#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26897pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26898    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26899}
26900#[doc = "Transpose vectors"]
26901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
26902#[inline]
26903#[target_feature(enable = "neon")]
26904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26905#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26906pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26907    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26908}
26909#[doc = "Transpose vectors"]
26910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
26911#[inline]
26912#[target_feature(enable = "neon")]
26913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26914#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26915pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26916    unsafe {
26917        simd_shuffle!(
26918            a,
26919            b,
26920            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26921        )
26922    }
26923}
26924#[doc = "Transpose vectors"]
26925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
26926#[inline]
26927#[target_feature(enable = "neon")]
26928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26929#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26930pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26931    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26932}
26933#[doc = "Transpose vectors"]
26934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
26935#[inline]
26936#[target_feature(enable = "neon")]
26937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26938#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26939pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26940    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26941}
26942#[doc = "Transpose vectors"]
26943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
26944#[inline]
26945#[target_feature(enable = "neon,fp16")]
26946#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26947#[cfg(not(target_arch = "arm64ec"))]
26948#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26949pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26950    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26951}
26952#[doc = "Transpose vectors"]
26953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
26954#[inline]
26955#[target_feature(enable = "neon,fp16")]
26956#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26957#[cfg(not(target_arch = "arm64ec"))]
26958#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26959pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26960    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26961}
26962#[doc = "Transpose vectors"]
26963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
26964#[inline]
26965#[target_feature(enable = "neon")]
26966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26967#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26968pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26969    unsafe { simd_shuffle!(a, b, [1, 3]) }
26970}
26971#[doc = "Transpose vectors"]
26972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
26973#[inline]
26974#[target_feature(enable = "neon")]
26975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26976#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26977pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26978    unsafe { simd_shuffle!(a, b, [1, 3]) }
26979}
26980#[doc = "Transpose vectors"]
26981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
26982#[inline]
26983#[target_feature(enable = "neon")]
26984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26985#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26986pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26987    unsafe { simd_shuffle!(a, b, [1, 3]) }
26988}
26989#[doc = "Transpose vectors"]
26990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
26991#[inline]
26992#[target_feature(enable = "neon")]
26993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26994#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26995pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26996    unsafe { simd_shuffle!(a, b, [1, 3]) }
26997}
26998#[doc = "Transpose vectors"]
26999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
27000#[inline]
27001#[target_feature(enable = "neon")]
27002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27003#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27004pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27005    unsafe { simd_shuffle!(a, b, [1, 3]) }
27006}
27007#[doc = "Transpose vectors"]
27008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
27009#[inline]
27010#[target_feature(enable = "neon")]
27011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27012#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27013pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27014    unsafe { simd_shuffle!(a, b, [1, 3]) }
27015}
27016#[doc = "Transpose vectors"]
27017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
27018#[inline]
27019#[target_feature(enable = "neon")]
27020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27021#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27022pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27023    unsafe { simd_shuffle!(a, b, [1, 3]) }
27024}
27025#[doc = "Transpose vectors"]
27026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
27027#[inline]
27028#[target_feature(enable = "neon")]
27029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27030#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27031pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27032    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27033}
27034#[doc = "Transpose vectors"]
27035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
27036#[inline]
27037#[target_feature(enable = "neon")]
27038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27039#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27040pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27041    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27042}
27043#[doc = "Transpose vectors"]
27044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
27045#[inline]
27046#[target_feature(enable = "neon")]
27047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27048#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27049pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27050    unsafe {
27051        simd_shuffle!(
27052            a,
27053            b,
27054            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27055        )
27056    }
27057}
27058#[doc = "Transpose vectors"]
27059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
27060#[inline]
27061#[target_feature(enable = "neon")]
27062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27063#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27064pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27065    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27066}
27067#[doc = "Transpose vectors"]
27068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
27069#[inline]
27070#[target_feature(enable = "neon")]
27071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27072#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27073pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27074    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27075}
27076#[doc = "Transpose vectors"]
27077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
27078#[inline]
27079#[target_feature(enable = "neon")]
27080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27081#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27082pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27083    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27084}
27085#[doc = "Transpose vectors"]
27086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
27087#[inline]
27088#[target_feature(enable = "neon")]
27089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27090#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27091pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27092    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27093}
27094#[doc = "Transpose vectors"]
27095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
27096#[inline]
27097#[target_feature(enable = "neon")]
27098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27099#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27100pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27101    unsafe {
27102        simd_shuffle!(
27103            a,
27104            b,
27105            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27106        )
27107    }
27108}
27109#[doc = "Transpose vectors"]
27110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
27111#[inline]
27112#[target_feature(enable = "neon")]
27113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27114#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27115pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27116    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27117}
27118#[doc = "Transpose vectors"]
27119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
27120#[inline]
27121#[target_feature(enable = "neon")]
27122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27123#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27124pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27125    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27126}
27127#[doc = "Transpose vectors"]
27128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
27129#[inline]
27130#[target_feature(enable = "neon")]
27131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27132#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27133pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27134    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27135}
27136#[doc = "Transpose vectors"]
27137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
27138#[inline]
27139#[target_feature(enable = "neon")]
27140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27142pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27143    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27144}
27145#[doc = "Transpose vectors"]
27146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
27147#[inline]
27148#[target_feature(enable = "neon")]
27149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27150#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27151pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27152    unsafe {
27153        simd_shuffle!(
27154            a,
27155            b,
27156            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27157        )
27158    }
27159}
27160#[doc = "Transpose vectors"]
27161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
27162#[inline]
27163#[target_feature(enable = "neon")]
27164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27165#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27166pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27167    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27168}
27169#[doc = "Transpose vectors"]
27170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
27171#[inline]
27172#[target_feature(enable = "neon")]
27173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27174#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27175pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27176    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27177}
27178#[doc = "Signed compare bitwise Test bits nonzero"]
27179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
27180#[inline]
27181#[target_feature(enable = "neon")]
27182#[cfg_attr(test, assert_instr(cmtst))]
27183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27184pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
27185    unsafe {
27186        let c: int64x1_t = simd_and(a, b);
27187        let d: i64x1 = i64x1::new(0);
27188        simd_ne(c, transmute(d))
27189    }
27190}
27191#[doc = "Signed compare bitwise Test bits nonzero"]
27192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
27193#[inline]
27194#[target_feature(enable = "neon")]
27195#[cfg_attr(test, assert_instr(cmtst))]
27196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27197pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
27198    unsafe {
27199        let c: int64x2_t = simd_and(a, b);
27200        let d: i64x2 = i64x2::new(0, 0);
27201        simd_ne(c, transmute(d))
27202    }
27203}
27204#[doc = "Signed compare bitwise Test bits nonzero"]
27205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
27206#[inline]
27207#[target_feature(enable = "neon")]
27208#[cfg_attr(test, assert_instr(cmtst))]
27209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27210pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
27211    unsafe {
27212        let c: poly64x1_t = simd_and(a, b);
27213        let d: i64x1 = i64x1::new(0);
27214        simd_ne(c, transmute(d))
27215    }
27216}
27217#[doc = "Signed compare bitwise Test bits nonzero"]
27218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
27219#[inline]
27220#[target_feature(enable = "neon")]
27221#[cfg_attr(test, assert_instr(cmtst))]
27222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27223pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
27224    unsafe {
27225        let c: poly64x2_t = simd_and(a, b);
27226        let d: i64x2 = i64x2::new(0, 0);
27227        simd_ne(c, transmute(d))
27228    }
27229}
27230#[doc = "Unsigned compare bitwise Test bits nonzero"]
27231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
27232#[inline]
27233#[target_feature(enable = "neon")]
27234#[cfg_attr(test, assert_instr(cmtst))]
27235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27236pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
27237    unsafe {
27238        let c: uint64x1_t = simd_and(a, b);
27239        let d: u64x1 = u64x1::new(0);
27240        simd_ne(c, transmute(d))
27241    }
27242}
27243#[doc = "Unsigned compare bitwise Test bits nonzero"]
27244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
27245#[inline]
27246#[target_feature(enable = "neon")]
27247#[cfg_attr(test, assert_instr(cmtst))]
27248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27249pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27250    unsafe {
27251        let c: uint64x2_t = simd_and(a, b);
27252        let d: u64x2 = u64x2::new(0, 0);
27253        simd_ne(c, transmute(d))
27254    }
27255}
27256#[doc = "Compare bitwise test bits nonzero"]
27257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
27258#[inline]
27259#[target_feature(enable = "neon")]
27260#[cfg_attr(test, assert_instr(tst))]
27261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27262pub fn vtstd_s64(a: i64, b: i64) -> u64 {
27263    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
27264}
27265#[doc = "Compare bitwise test bits nonzero"]
27266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
27267#[inline]
27268#[target_feature(enable = "neon")]
27269#[cfg_attr(test, assert_instr(tst))]
27270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27271pub fn vtstd_u64(a: u64, b: u64) -> u64 {
27272    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
27273}
27274#[doc = "Signed saturating Accumulate of Unsigned value."]
27275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
27276#[inline]
27277#[target_feature(enable = "neon")]
27278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27279#[cfg_attr(test, assert_instr(suqadd))]
27280pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
27281    unsafe extern "unadjusted" {
27282        #[cfg_attr(
27283            any(target_arch = "aarch64", target_arch = "arm64ec"),
27284            link_name = "llvm.aarch64.neon.suqadd.v8i8"
27285        )]
27286        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
27287    }
27288    unsafe { _vuqadd_s8(a, b) }
27289}
27290#[doc = "Signed saturating Accumulate of Unsigned value."]
27291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
27292#[inline]
27293#[target_feature(enable = "neon")]
27294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27295#[cfg_attr(test, assert_instr(suqadd))]
27296pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
27297    unsafe extern "unadjusted" {
27298        #[cfg_attr(
27299            any(target_arch = "aarch64", target_arch = "arm64ec"),
27300            link_name = "llvm.aarch64.neon.suqadd.v16i8"
27301        )]
27302        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
27303    }
27304    unsafe { _vuqaddq_s8(a, b) }
27305}
27306#[doc = "Signed saturating Accumulate of Unsigned value."]
27307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
27308#[inline]
27309#[target_feature(enable = "neon")]
27310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27311#[cfg_attr(test, assert_instr(suqadd))]
27312pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
27313    unsafe extern "unadjusted" {
27314        #[cfg_attr(
27315            any(target_arch = "aarch64", target_arch = "arm64ec"),
27316            link_name = "llvm.aarch64.neon.suqadd.v4i16"
27317        )]
27318        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
27319    }
27320    unsafe { _vuqadd_s16(a, b) }
27321}
27322#[doc = "Signed saturating Accumulate of Unsigned value."]
27323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
27324#[inline]
27325#[target_feature(enable = "neon")]
27326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27327#[cfg_attr(test, assert_instr(suqadd))]
27328pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
27329    unsafe extern "unadjusted" {
27330        #[cfg_attr(
27331            any(target_arch = "aarch64", target_arch = "arm64ec"),
27332            link_name = "llvm.aarch64.neon.suqadd.v8i16"
27333        )]
27334        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
27335    }
27336    unsafe { _vuqaddq_s16(a, b) }
27337}
27338#[doc = "Signed saturating Accumulate of Unsigned value."]
27339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
27340#[inline]
27341#[target_feature(enable = "neon")]
27342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27343#[cfg_attr(test, assert_instr(suqadd))]
27344pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
27345    unsafe extern "unadjusted" {
27346        #[cfg_attr(
27347            any(target_arch = "aarch64", target_arch = "arm64ec"),
27348            link_name = "llvm.aarch64.neon.suqadd.v2i32"
27349        )]
27350        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
27351    }
27352    unsafe { _vuqadd_s32(a, b) }
27353}
27354#[doc = "Signed saturating Accumulate of Unsigned value."]
27355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
27356#[inline]
27357#[target_feature(enable = "neon")]
27358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27359#[cfg_attr(test, assert_instr(suqadd))]
27360pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
27361    unsafe extern "unadjusted" {
27362        #[cfg_attr(
27363            any(target_arch = "aarch64", target_arch = "arm64ec"),
27364            link_name = "llvm.aarch64.neon.suqadd.v4i32"
27365        )]
27366        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
27367    }
27368    unsafe { _vuqaddq_s32(a, b) }
27369}
27370#[doc = "Signed saturating Accumulate of Unsigned value."]
27371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
27372#[inline]
27373#[target_feature(enable = "neon")]
27374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27375#[cfg_attr(test, assert_instr(suqadd))]
27376pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
27377    unsafe extern "unadjusted" {
27378        #[cfg_attr(
27379            any(target_arch = "aarch64", target_arch = "arm64ec"),
27380            link_name = "llvm.aarch64.neon.suqadd.v1i64"
27381        )]
27382        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
27383    }
27384    unsafe { _vuqadd_s64(a, b) }
27385}
27386#[doc = "Signed saturating Accumulate of Unsigned value."]
27387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
27388#[inline]
27389#[target_feature(enable = "neon")]
27390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27391#[cfg_attr(test, assert_instr(suqadd))]
27392pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
27393    unsafe extern "unadjusted" {
27394        #[cfg_attr(
27395            any(target_arch = "aarch64", target_arch = "arm64ec"),
27396            link_name = "llvm.aarch64.neon.suqadd.v2i64"
27397        )]
27398        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
27399    }
27400    unsafe { _vuqaddq_s64(a, b) }
27401}
27402#[doc = "Signed saturating accumulate of unsigned value"]
27403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
27404#[inline]
27405#[target_feature(enable = "neon")]
27406#[cfg_attr(test, assert_instr(suqadd))]
27407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27408pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
27409    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
27410}
27411#[doc = "Signed saturating accumulate of unsigned value"]
27412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
27413#[inline]
27414#[target_feature(enable = "neon")]
27415#[cfg_attr(test, assert_instr(suqadd))]
27416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27417pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
27418    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
27419}
27420#[doc = "Signed saturating accumulate of unsigned value"]
27421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
27422#[inline]
27423#[target_feature(enable = "neon")]
27424#[cfg_attr(test, assert_instr(suqadd))]
27425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27426pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
27427    unsafe extern "unadjusted" {
27428        #[cfg_attr(
27429            any(target_arch = "aarch64", target_arch = "arm64ec"),
27430            link_name = "llvm.aarch64.neon.suqadd.i64"
27431        )]
27432        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
27433    }
27434    unsafe { _vuqaddd_s64(a, b) }
27435}
27436#[doc = "Signed saturating accumulate of unsigned value"]
27437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
27438#[inline]
27439#[target_feature(enable = "neon")]
27440#[cfg_attr(test, assert_instr(suqadd))]
27441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27442pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
27443    unsafe extern "unadjusted" {
27444        #[cfg_attr(
27445            any(target_arch = "aarch64", target_arch = "arm64ec"),
27446            link_name = "llvm.aarch64.neon.suqadd.i32"
27447        )]
27448        fn _vuqadds_s32(a: i32, b: u32) -> i32;
27449    }
27450    unsafe { _vuqadds_s32(a, b) }
27451}
27452#[doc = "Dot product index form with unsigned and signed integers"]
27453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
27454#[inline]
27455#[target_feature(enable = "neon,i8mm")]
27456#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
27457#[rustc_legacy_const_generics(3)]
27458#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27459pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
27460    static_assert_uimm_bits!(LANE, 2);
27461    unsafe {
27462        let c: int32x4_t = transmute(c);
27463        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
27464        vusdot_s32(a, b, transmute(c))
27465    }
27466}
27467#[doc = "Dot product index form with unsigned and signed integers"]
27468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
27469#[inline]
27470#[target_feature(enable = "neon,i8mm")]
27471#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
27472#[rustc_legacy_const_generics(3)]
27473#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27474pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
27475    static_assert_uimm_bits!(LANE, 2);
27476    unsafe {
27477        let c: int32x4_t = transmute(c);
27478        let c: int32x4_t =
27479            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
27480        vusdotq_s32(a, b, transmute(c))
27481    }
27482}
27483#[doc = "Unzip vectors"]
27484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
27485#[inline]
27486#[target_feature(enable = "neon,fp16")]
27487#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27488#[cfg(not(target_arch = "arm64ec"))]
27489#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27490pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27491    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27492}
27493#[doc = "Unzip vectors"]
27494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
27495#[inline]
27496#[target_feature(enable = "neon,fp16")]
27497#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27498#[cfg(not(target_arch = "arm64ec"))]
27499#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27500pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27501    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27502}
27503#[doc = "Unzip vectors"]
27504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
27505#[inline]
27506#[target_feature(enable = "neon")]
27507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27508#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27509pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27510    unsafe { simd_shuffle!(a, b, [0, 2]) }
27511}
27512#[doc = "Unzip vectors"]
27513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
27514#[inline]
27515#[target_feature(enable = "neon")]
27516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27517#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27518pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27519    unsafe { simd_shuffle!(a, b, [0, 2]) }
27520}
27521#[doc = "Unzip vectors"]
27522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
27523#[inline]
27524#[target_feature(enable = "neon")]
27525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27526#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27527pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27528    unsafe { simd_shuffle!(a, b, [0, 2]) }
27529}
27530#[doc = "Unzip vectors"]
27531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
27532#[inline]
27533#[target_feature(enable = "neon")]
27534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27535#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27536pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27537    unsafe { simd_shuffle!(a, b, [0, 2]) }
27538}
27539#[doc = "Unzip vectors"]
27540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
27541#[inline]
27542#[target_feature(enable = "neon")]
27543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27544#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27545pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27546    unsafe { simd_shuffle!(a, b, [0, 2]) }
27547}
27548#[doc = "Unzip vectors"]
27549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
27550#[inline]
27551#[target_feature(enable = "neon")]
27552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27553#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27554pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27555    unsafe { simd_shuffle!(a, b, [0, 2]) }
27556}
27557#[doc = "Unzip vectors"]
27558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
27559#[inline]
27560#[target_feature(enable = "neon")]
27561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27562#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27563pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27564    unsafe { simd_shuffle!(a, b, [0, 2]) }
27565}
27566#[doc = "Unzip vectors"]
27567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
27568#[inline]
27569#[target_feature(enable = "neon")]
27570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27571#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27572pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27573    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27574}
27575#[doc = "Unzip vectors"]
27576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
27577#[inline]
27578#[target_feature(enable = "neon")]
27579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27580#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27581pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27582    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27583}
27584#[doc = "Unzip vectors"]
27585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
27586#[inline]
27587#[target_feature(enable = "neon")]
27588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27589#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27590pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27591    unsafe {
27592        simd_shuffle!(
27593            a,
27594            b,
27595            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27596        )
27597    }
27598}
27599#[doc = "Unzip vectors"]
27600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
27601#[inline]
27602#[target_feature(enable = "neon")]
27603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27604#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27605pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27606    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27607}
27608#[doc = "Unzip vectors"]
27609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
27610#[inline]
27611#[target_feature(enable = "neon")]
27612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27613#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27614pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27615    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27616}
27617#[doc = "Unzip vectors"]
27618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
27619#[inline]
27620#[target_feature(enable = "neon")]
27621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27622#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27623pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27624    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27625}
27626#[doc = "Unzip vectors"]
27627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
27628#[inline]
27629#[target_feature(enable = "neon")]
27630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27631#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27632pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27633    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27634}
27635#[doc = "Unzip vectors"]
27636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
27637#[inline]
27638#[target_feature(enable = "neon")]
27639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27640#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27641pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27642    unsafe {
27643        simd_shuffle!(
27644            a,
27645            b,
27646            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27647        )
27648    }
27649}
27650#[doc = "Unzip vectors"]
27651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
27652#[inline]
27653#[target_feature(enable = "neon")]
27654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27656pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27657    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27658}
27659#[doc = "Unzip vectors"]
27660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
27661#[inline]
27662#[target_feature(enable = "neon")]
27663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27664#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27665pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27666    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27667}
27668#[doc = "Unzip vectors"]
27669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
27670#[inline]
27671#[target_feature(enable = "neon")]
27672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27673#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27674pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27675    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27676}
27677#[doc = "Unzip vectors"]
27678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
27679#[inline]
27680#[target_feature(enable = "neon")]
27681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27682#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27683pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27684    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27685}
27686#[doc = "Unzip vectors"]
27687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
27688#[inline]
27689#[target_feature(enable = "neon")]
27690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27691#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27692pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27693    unsafe {
27694        simd_shuffle!(
27695            a,
27696            b,
27697            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27698        )
27699    }
27700}
27701#[doc = "Unzip vectors"]
27702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
27703#[inline]
27704#[target_feature(enable = "neon")]
27705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27706#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27707pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27708    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27709}
27710#[doc = "Unzip vectors"]
27711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
27712#[inline]
27713#[target_feature(enable = "neon")]
27714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27715#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27716pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27717    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27718}
27719#[doc = "Unzip vectors"]
27720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
27721#[inline]
27722#[target_feature(enable = "neon,fp16")]
27723#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27724#[cfg(not(target_arch = "arm64ec"))]
27725#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27726pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27727    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27728}
27729#[doc = "Unzip vectors"]
27730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
27731#[inline]
27732#[target_feature(enable = "neon,fp16")]
27733#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27734#[cfg(not(target_arch = "arm64ec"))]
27735#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27736pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27737    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27738}
27739#[doc = "Unzip vectors"]
27740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
27741#[inline]
27742#[target_feature(enable = "neon")]
27743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27745pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27746    unsafe { simd_shuffle!(a, b, [1, 3]) }
27747}
27748#[doc = "Unzip vectors"]
27749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
27750#[inline]
27751#[target_feature(enable = "neon")]
27752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27753#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27754pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27755    unsafe { simd_shuffle!(a, b, [1, 3]) }
27756}
27757#[doc = "Unzip vectors"]
27758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
27759#[inline]
27760#[target_feature(enable = "neon")]
27761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27762#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27763pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27764    unsafe { simd_shuffle!(a, b, [1, 3]) }
27765}
27766#[doc = "Unzip vectors"]
27767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
27768#[inline]
27769#[target_feature(enable = "neon")]
27770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27772pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27773    unsafe { simd_shuffle!(a, b, [1, 3]) }
27774}
27775#[doc = "Unzip vectors"]
27776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
27777#[inline]
27778#[target_feature(enable = "neon")]
27779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27780#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27781pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27782    unsafe { simd_shuffle!(a, b, [1, 3]) }
27783}
27784#[doc = "Unzip vectors"]
27785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
27786#[inline]
27787#[target_feature(enable = "neon")]
27788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27789#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27790pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27791    unsafe { simd_shuffle!(a, b, [1, 3]) }
27792}
27793#[doc = "Unzip vectors"]
27794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
27795#[inline]
27796#[target_feature(enable = "neon")]
27797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27798#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27799pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27800    unsafe { simd_shuffle!(a, b, [1, 3]) }
27801}
27802#[doc = "Unzip vectors"]
27803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
27804#[inline]
27805#[target_feature(enable = "neon")]
27806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27807#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27808pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27809    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27810}
27811#[doc = "Unzip vectors"]
27812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
27813#[inline]
27814#[target_feature(enable = "neon")]
27815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27816#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27817pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27818    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27819}
27820#[doc = "Unzip vectors"]
27821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
27822#[inline]
27823#[target_feature(enable = "neon")]
27824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27825#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27826pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27827    unsafe {
27828        simd_shuffle!(
27829            a,
27830            b,
27831            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27832        )
27833    }
27834}
27835#[doc = "Unzip vectors"]
27836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
27837#[inline]
27838#[target_feature(enable = "neon")]
27839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27840#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27841pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27842    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27843}
27844#[doc = "Unzip vectors"]
27845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
27846#[inline]
27847#[target_feature(enable = "neon")]
27848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27849#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27850pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27851    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27852}
27853#[doc = "Unzip vectors"]
27854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
27855#[inline]
27856#[target_feature(enable = "neon")]
27857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27859pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27860    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27861}
27862#[doc = "Unzip vectors"]
27863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
27864#[inline]
27865#[target_feature(enable = "neon")]
27866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27868pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27869    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27870}
27871#[doc = "Unzip vectors"]
27872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
27873#[inline]
27874#[target_feature(enable = "neon")]
27875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27876#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27877pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27878    unsafe {
27879        simd_shuffle!(
27880            a,
27881            b,
27882            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27883        )
27884    }
27885}
27886#[doc = "Unzip vectors"]
27887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
27888#[inline]
27889#[target_feature(enable = "neon")]
27890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27891#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27892pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27893    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27894}
27895#[doc = "Unzip vectors"]
27896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
27897#[inline]
27898#[target_feature(enable = "neon")]
27899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27900#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27901pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27902    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27903}
27904#[doc = "Unzip vectors"]
27905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
27906#[inline]
27907#[target_feature(enable = "neon")]
27908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27909#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27910pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27911    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27912}
27913#[doc = "Unzip vectors"]
27914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
27915#[inline]
27916#[target_feature(enable = "neon")]
27917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27918#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27919pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27920    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27921}
27922#[doc = "Unzip vectors"]
27923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
27924#[inline]
27925#[target_feature(enable = "neon")]
27926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27927#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27928pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27929    unsafe {
27930        simd_shuffle!(
27931            a,
27932            b,
27933            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27934        )
27935    }
27936}
27937#[doc = "Unzip vectors"]
27938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
27939#[inline]
27940#[target_feature(enable = "neon")]
27941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27942#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27943pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27944    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27945}
27946#[doc = "Unzip vectors"]
27947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
27948#[inline]
27949#[target_feature(enable = "neon")]
27950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27951#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27952pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27953    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27954}
27955#[doc = "Exclusive OR and rotate"]
27956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
27957#[inline]
27958#[target_feature(enable = "neon,sha3")]
27959#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
27960#[rustc_legacy_const_generics(2)]
27961#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
27962pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27963    static_assert_uimm_bits!(IMM6, 6);
27964    unsafe extern "unadjusted" {
27965        #[cfg_attr(
27966            any(target_arch = "aarch64", target_arch = "arm64ec"),
27967            link_name = "llvm.aarch64.crypto.xar"
27968        )]
27969        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
27970    }
27971    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
27972}
27973#[doc = "Zip vectors"]
27974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
27975#[inline]
27976#[target_feature(enable = "neon,fp16")]
27977#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27978#[cfg(not(target_arch = "arm64ec"))]
27979#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27980pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27981    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27982}
27983#[doc = "Zip vectors"]
27984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
27985#[inline]
27986#[target_feature(enable = "neon,fp16")]
27987#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27988#[cfg(not(target_arch = "arm64ec"))]
27989#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27990pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27991    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27992}
27993#[doc = "Zip vectors"]
27994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
27995#[inline]
27996#[target_feature(enable = "neon")]
27997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27998#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27999pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28000    unsafe { simd_shuffle!(a, b, [0, 2]) }
28001}
28002#[doc = "Zip vectors"]
28003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
28004#[inline]
28005#[target_feature(enable = "neon")]
28006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28007#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28008pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28009    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28010}
28011#[doc = "Zip vectors"]
28012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
28013#[inline]
28014#[target_feature(enable = "neon")]
28015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28016#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28017pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28018    unsafe { simd_shuffle!(a, b, [0, 2]) }
28019}
28020#[doc = "Zip vectors"]
28021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
28022#[inline]
28023#[target_feature(enable = "neon")]
28024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28025#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28026pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28027    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28028}
28029#[doc = "Zip vectors"]
28030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
28031#[inline]
28032#[target_feature(enable = "neon")]
28033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28034#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28035pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28036    unsafe {
28037        simd_shuffle!(
28038            a,
28039            b,
28040            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28041        )
28042    }
28043}
28044#[doc = "Zip vectors"]
28045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
28046#[inline]
28047#[target_feature(enable = "neon")]
28048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28050pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28051    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28052}
28053#[doc = "Zip vectors"]
28054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
28055#[inline]
28056#[target_feature(enable = "neon")]
28057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28058#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28059pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28060    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28061}
28062#[doc = "Zip vectors"]
28063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
28064#[inline]
28065#[target_feature(enable = "neon")]
28066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28067#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28068pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28069    unsafe { simd_shuffle!(a, b, [0, 2]) }
28070}
28071#[doc = "Zip vectors"]
28072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
28073#[inline]
28074#[target_feature(enable = "neon")]
28075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28076#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28077pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28078    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28079}
28080#[doc = "Zip vectors"]
28081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
28082#[inline]
28083#[target_feature(enable = "neon")]
28084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28085#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28086pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28087    unsafe { simd_shuffle!(a, b, [0, 2]) }
28088}
28089#[doc = "Zip vectors"]
28090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
28091#[inline]
28092#[target_feature(enable = "neon")]
28093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28095pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28096    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28097}
28098#[doc = "Zip vectors"]
28099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
28100#[inline]
28101#[target_feature(enable = "neon")]
28102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28103#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28104pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28105    unsafe {
28106        simd_shuffle!(
28107            a,
28108            b,
28109            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28110        )
28111    }
28112}
28113#[doc = "Zip vectors"]
28114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
28115#[inline]
28116#[target_feature(enable = "neon")]
28117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28119pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28120    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28121}
28122#[doc = "Zip vectors"]
28123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
28124#[inline]
28125#[target_feature(enable = "neon")]
28126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28128pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28129    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28130}
28131#[doc = "Zip vectors"]
28132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
28133#[inline]
28134#[target_feature(enable = "neon")]
28135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28136#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28137pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28138    unsafe { simd_shuffle!(a, b, [0, 2]) }
28139}
28140#[doc = "Zip vectors"]
28141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
28142#[inline]
28143#[target_feature(enable = "neon")]
28144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28145#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28146pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28147    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28148}
28149#[doc = "Zip vectors"]
28150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
28151#[inline]
28152#[target_feature(enable = "neon")]
28153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28154#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28155pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28156    unsafe { simd_shuffle!(a, b, [0, 2]) }
28157}
28158#[doc = "Zip vectors"]
28159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
28160#[inline]
28161#[target_feature(enable = "neon")]
28162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28163#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28164pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28165    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28166}
28167#[doc = "Zip vectors"]
28168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
28169#[inline]
28170#[target_feature(enable = "neon")]
28171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28172#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28173pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28174    unsafe {
28175        simd_shuffle!(
28176            a,
28177            b,
28178            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28179        )
28180    }
28181}
28182#[doc = "Zip vectors"]
28183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
28184#[inline]
28185#[target_feature(enable = "neon")]
28186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28187#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28188pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28189    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28190}
28191#[doc = "Zip vectors"]
28192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
28193#[inline]
28194#[target_feature(enable = "neon")]
28195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28196#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28197pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28198    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28199}
28200#[doc = "Zip vectors"]
28201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
28202#[inline]
28203#[target_feature(enable = "neon")]
28204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28205#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28206pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28207    unsafe { simd_shuffle!(a, b, [0, 2]) }
28208}
28209#[doc = "Zip vectors"]
28210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
28211#[inline]
28212#[target_feature(enable = "neon,fp16")]
28213#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28214#[cfg(not(target_arch = "arm64ec"))]
28215#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28216pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28217    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28218}
28219#[doc = "Zip vectors"]
28220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
28221#[inline]
28222#[target_feature(enable = "neon,fp16")]
28223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28224#[cfg(not(target_arch = "arm64ec"))]
28225#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28226pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28227    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28228}
28229#[doc = "Zip vectors"]
28230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
28231#[inline]
28232#[target_feature(enable = "neon")]
28233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28234#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28235pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28236    unsafe { simd_shuffle!(a, b, [1, 3]) }
28237}
28238#[doc = "Zip vectors"]
28239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
28240#[inline]
28241#[target_feature(enable = "neon")]
28242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28243#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28244pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28245    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28246}
28247#[doc = "Zip vectors"]
28248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
28249#[inline]
28250#[target_feature(enable = "neon")]
28251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28252#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28253pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28254    unsafe { simd_shuffle!(a, b, [1, 3]) }
28255}
28256#[doc = "Zip vectors"]
28257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
28258#[inline]
28259#[target_feature(enable = "neon")]
28260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28261#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28262pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28263    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28264}
28265#[doc = "Zip vectors"]
28266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
28267#[inline]
28268#[target_feature(enable = "neon")]
28269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28270#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28271pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28272    unsafe {
28273        simd_shuffle!(
28274            a,
28275            b,
28276            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28277        )
28278    }
28279}
28280#[doc = "Zip vectors"]
28281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
28282#[inline]
28283#[target_feature(enable = "neon")]
28284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28285#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28286pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28287    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28288}
28289#[doc = "Zip vectors"]
28290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
28291#[inline]
28292#[target_feature(enable = "neon")]
28293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28294#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28295pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28296    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28297}
28298#[doc = "Zip vectors"]
28299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
28300#[inline]
28301#[target_feature(enable = "neon")]
28302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28303#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28304pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28305    unsafe { simd_shuffle!(a, b, [1, 3]) }
28306}
28307#[doc = "Zip vectors"]
28308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
28309#[inline]
28310#[target_feature(enable = "neon")]
28311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28312#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28313pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28314    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28315}
28316#[doc = "Zip vectors"]
28317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
28318#[inline]
28319#[target_feature(enable = "neon")]
28320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28321#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28322pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28323    unsafe { simd_shuffle!(a, b, [1, 3]) }
28324}
28325#[doc = "Zip vectors"]
28326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
28327#[inline]
28328#[target_feature(enable = "neon")]
28329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28330#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28331pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28332    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28333}
28334#[doc = "Zip vectors"]
28335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
28336#[inline]
28337#[target_feature(enable = "neon")]
28338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28339#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28340pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28341    unsafe {
28342        simd_shuffle!(
28343            a,
28344            b,
28345            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28346        )
28347    }
28348}
28349#[doc = "Zip vectors"]
28350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
28351#[inline]
28352#[target_feature(enable = "neon")]
28353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28354#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28355pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28356    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28357}
28358#[doc = "Zip vectors"]
28359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
28360#[inline]
28361#[target_feature(enable = "neon")]
28362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28363#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28364pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28365    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28366}
28367#[doc = "Zip vectors"]
28368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
28369#[inline]
28370#[target_feature(enable = "neon")]
28371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28372#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28373pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28374    unsafe { simd_shuffle!(a, b, [1, 3]) }
28375}
28376#[doc = "Zip vectors"]
28377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
28378#[inline]
28379#[target_feature(enable = "neon")]
28380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28381#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28382pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28383    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28384}
28385#[doc = "Zip vectors"]
28386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
28387#[inline]
28388#[target_feature(enable = "neon")]
28389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28390#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28391pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28392    unsafe { simd_shuffle!(a, b, [1, 3]) }
28393}
28394#[doc = "Zip vectors"]
28395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
28396#[inline]
28397#[target_feature(enable = "neon")]
28398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28399#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28400pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28401    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28402}
28403#[doc = "Zip vectors"]
28404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
28405#[inline]
28406#[target_feature(enable = "neon")]
28407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28408#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28409pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28410    unsafe {
28411        simd_shuffle!(
28412            a,
28413            b,
28414            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28415        )
28416    }
28417}
28418#[doc = "Zip vectors"]
28419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
28420#[inline]
28421#[target_feature(enable = "neon")]
28422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28423#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28424pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28425    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28426}
28427#[doc = "Zip vectors"]
28428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
28429#[inline]
28430#[target_feature(enable = "neon")]
28431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28432#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28433pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28434    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28435}
28436#[doc = "Zip vectors"]
28437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
28438#[inline]
28439#[target_feature(enable = "neon")]
28440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28441#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28442pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28443    unsafe { simd_shuffle!(a, b, [1, 3]) }
28444}