1#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22 unsafe extern "unadjusted" {
23 #[cfg_attr(
24 any(target_arch = "aarch64", target_arch = "arm64ec"),
25 link_name = "llvm.aarch64.crc32cx"
26 )]
27 fn ___crc32cd(crc: u32, data: u64) -> u32;
28 }
29 unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38 unsafe extern "unadjusted" {
39 #[cfg_attr(
40 any(target_arch = "aarch64", target_arch = "arm64ec"),
41 link_name = "llvm.aarch64.crc32x"
42 )]
43 fn ___crc32d(crc: u32, data: u64) -> u32;
44 }
45 unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "CURRENT_RUSTC_VERSION")]
53pub fn __jcvt(a: f64) -> i32 {
54 unsafe extern "unadjusted" {
55 #[cfg_attr(
56 any(target_arch = "aarch64", target_arch = "arm64ec"),
57 link_name = "llvm.aarch64.fjcvtzs"
58 )]
59 fn ___jcvt(a: f64) -> i32;
60 }
61 unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70 unsafe {
71 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72 let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73 let f: int8x8_t = vabd_s8(d, e);
74 let f: uint8x8_t = simd_cast(f);
75 simd_add(a, simd_cast(f))
76 }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85 unsafe {
86 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87 let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88 let f: int16x4_t = vabd_s16(d, e);
89 let f: uint16x4_t = simd_cast(f);
90 simd_add(a, simd_cast(f))
91 }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100 unsafe {
101 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102 let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103 let f: int32x2_t = vabd_s32(d, e);
104 let f: uint32x2_t = simd_cast(f);
105 simd_add(a, simd_cast(f))
106 }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115 unsafe {
116 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117 let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118 let f: uint8x8_t = vabd_u8(d, e);
119 simd_add(a, simd_cast(f))
120 }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129 unsafe {
130 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131 let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132 let f: uint16x4_t = vabd_u16(d, e);
133 simd_add(a, simd_cast(f))
134 }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143 unsafe {
144 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145 let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146 let f: uint32x2_t = vabd_u32(d, e);
147 simd_add(a, simd_cast(f))
148 }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157 unsafe extern "unadjusted" {
158 #[cfg_attr(
159 any(target_arch = "aarch64", target_arch = "arm64ec"),
160 link_name = "llvm.aarch64.neon.fabd.v1f64"
161 )]
162 fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163 }
164 unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173 unsafe extern "unadjusted" {
174 #[cfg_attr(
175 any(target_arch = "aarch64", target_arch = "arm64ec"),
176 link_name = "llvm.aarch64.neon.fabd.v2f64"
177 )]
178 fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179 }
180 unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189 unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198 unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208 unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217 unsafe {
218 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220 let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221 simd_cast(e)
222 }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231 unsafe {
232 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234 let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235 simd_cast(e)
236 }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245 unsafe {
246 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248 let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249 simd_cast(e)
250 }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259 unsafe {
260 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262 simd_cast(vabd_u8(c, d))
263 }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272 unsafe {
273 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275 simd_cast(vabd_u16(c, d))
276 }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285 unsafe {
286 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288 simd_cast(vabd_u32(c, d))
289 }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298 unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307 unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316 unsafe {
317 let neg: int64x1_t = simd_neg(a);
318 let mask: int64x1_t = simd_ge(a, neg);
319 simd_select(mask, a, neg)
320 }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329 unsafe {
330 let neg: int64x2_t = simd_neg(a);
331 let mask: int64x2_t = simd_ge(a, neg);
332 simd_select(mask, a, neg)
333 }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342 unsafe extern "unadjusted" {
343 #[cfg_attr(
344 any(target_arch = "aarch64", target_arch = "arm64ec"),
345 link_name = "llvm.aarch64.neon.abs.i64"
346 )]
347 fn _vabsd_s64(a: i64) -> i64;
348 }
349 unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358 a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367 a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376 unsafe extern "unadjusted" {
377 #[cfg_attr(
378 any(target_arch = "aarch64", target_arch = "arm64ec"),
379 link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380 )]
381 fn _vaddlv_s16(a: int16x4_t) -> i32;
382 }
383 unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392 unsafe extern "unadjusted" {
393 #[cfg_attr(
394 any(target_arch = "aarch64", target_arch = "arm64ec"),
395 link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396 )]
397 fn _vaddlvq_s16(a: int16x8_t) -> i32;
398 }
399 unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408 unsafe extern "unadjusted" {
409 #[cfg_attr(
410 any(target_arch = "aarch64", target_arch = "arm64ec"),
411 link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412 )]
413 fn _vaddlvq_s32(a: int32x4_t) -> i64;
414 }
415 unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424 unsafe extern "unadjusted" {
425 #[cfg_attr(
426 any(target_arch = "aarch64", target_arch = "arm64ec"),
427 link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428 )]
429 fn _vaddlv_s32(a: int32x2_t) -> i64;
430 }
431 unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440 unsafe extern "unadjusted" {
441 #[cfg_attr(
442 any(target_arch = "aarch64", target_arch = "arm64ec"),
443 link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444 )]
445 fn _vaddlv_s8(a: int8x8_t) -> i32;
446 }
447 unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456 unsafe extern "unadjusted" {
457 #[cfg_attr(
458 any(target_arch = "aarch64", target_arch = "arm64ec"),
459 link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460 )]
461 fn _vaddlvq_s8(a: int8x16_t) -> i32;
462 }
463 unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472 unsafe extern "unadjusted" {
473 #[cfg_attr(
474 any(target_arch = "aarch64", target_arch = "arm64ec"),
475 link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476 )]
477 fn _vaddlv_u16(a: uint16x4_t) -> u32;
478 }
479 unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488 unsafe extern "unadjusted" {
489 #[cfg_attr(
490 any(target_arch = "aarch64", target_arch = "arm64ec"),
491 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492 )]
493 fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494 }
495 unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504 unsafe extern "unadjusted" {
505 #[cfg_attr(
506 any(target_arch = "aarch64", target_arch = "arm64ec"),
507 link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508 )]
509 fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510 }
511 unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520 unsafe extern "unadjusted" {
521 #[cfg_attr(
522 any(target_arch = "aarch64", target_arch = "arm64ec"),
523 link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524 )]
525 fn _vaddlv_u32(a: uint32x2_t) -> u64;
526 }
527 unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536 unsafe extern "unadjusted" {
537 #[cfg_attr(
538 any(target_arch = "aarch64", target_arch = "arm64ec"),
539 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540 )]
541 fn _vaddlv_u8(a: uint8x8_t) -> i32;
542 }
543 unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552 unsafe extern "unadjusted" {
553 #[cfg_attr(
554 any(target_arch = "aarch64", target_arch = "arm64ec"),
555 link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556 )]
557 fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558 }
559 unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568 unsafe extern "unadjusted" {
569 #[cfg_attr(
570 any(target_arch = "aarch64", target_arch = "arm64ec"),
571 link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572 )]
573 fn _vaddv_f32(a: float32x2_t) -> f32;
574 }
575 unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584 unsafe extern "unadjusted" {
585 #[cfg_attr(
586 any(target_arch = "aarch64", target_arch = "arm64ec"),
587 link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588 )]
589 fn _vaddvq_f32(a: float32x4_t) -> f32;
590 }
591 unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600 unsafe extern "unadjusted" {
601 #[cfg_attr(
602 any(target_arch = "aarch64", target_arch = "arm64ec"),
603 link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604 )]
605 fn _vaddvq_f64(a: float64x2_t) -> f64;
606 }
607 unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616 unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625 unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634 unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643 unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652 unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661 unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670 unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679 unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688 unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697 unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706 unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715 unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724 unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733 unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742 unsafe extern "unadjusted" {
743 #[cfg_attr(
744 any(target_arch = "aarch64", target_arch = "arm64ec"),
745 link_name = "llvm.aarch64.neon.famax.v4f16"
746 )]
747 fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748 }
749 unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758 unsafe extern "unadjusted" {
759 #[cfg_attr(
760 any(target_arch = "aarch64", target_arch = "arm64ec"),
761 link_name = "llvm.aarch64.neon.famax.v8f16"
762 )]
763 fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764 }
765 unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774 unsafe extern "unadjusted" {
775 #[cfg_attr(
776 any(target_arch = "aarch64", target_arch = "arm64ec"),
777 link_name = "llvm.aarch64.neon.famax.v2f32"
778 )]
779 fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780 }
781 unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790 unsafe extern "unadjusted" {
791 #[cfg_attr(
792 any(target_arch = "aarch64", target_arch = "arm64ec"),
793 link_name = "llvm.aarch64.neon.famax.v4f32"
794 )]
795 fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796 }
797 unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806 unsafe extern "unadjusted" {
807 #[cfg_attr(
808 any(target_arch = "aarch64", target_arch = "arm64ec"),
809 link_name = "llvm.aarch64.neon.famax.v2f64"
810 )]
811 fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812 }
813 unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822 unsafe extern "unadjusted" {
823 #[cfg_attr(
824 any(target_arch = "aarch64", target_arch = "arm64ec"),
825 link_name = "llvm.aarch64.neon.famin.v4f16"
826 )]
827 fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828 }
829 unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838 unsafe extern "unadjusted" {
839 #[cfg_attr(
840 any(target_arch = "aarch64", target_arch = "arm64ec"),
841 link_name = "llvm.aarch64.neon.famin.v8f16"
842 )]
843 fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844 }
845 unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854 unsafe extern "unadjusted" {
855 #[cfg_attr(
856 any(target_arch = "aarch64", target_arch = "arm64ec"),
857 link_name = "llvm.aarch64.neon.famin.v2f32"
858 )]
859 fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860 }
861 unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870 unsafe extern "unadjusted" {
871 #[cfg_attr(
872 any(target_arch = "aarch64", target_arch = "arm64ec"),
873 link_name = "llvm.aarch64.neon.famin.v4f32"
874 )]
875 fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876 }
877 unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886 unsafe extern "unadjusted" {
887 #[cfg_attr(
888 any(target_arch = "aarch64", target_arch = "arm64ec"),
889 link_name = "llvm.aarch64.neon.famin.v2f64"
890 )]
891 fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892 }
893 unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902 unsafe extern "unadjusted" {
903 #[cfg_attr(
904 any(target_arch = "aarch64", target_arch = "arm64ec"),
905 link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906 )]
907 fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908 }
909 unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918 unsafe extern "unadjusted" {
919 #[cfg_attr(
920 any(target_arch = "aarch64", target_arch = "arm64ec"),
921 link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922 )]
923 fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924 }
925 unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934 unsafe extern "unadjusted" {
935 #[cfg_attr(
936 any(target_arch = "aarch64", target_arch = "arm64ec"),
937 link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938 )]
939 fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940 }
941 unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950 unsafe extern "unadjusted" {
951 #[cfg_attr(
952 any(target_arch = "aarch64", target_arch = "arm64ec"),
953 link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954 )]
955 fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956 }
957 unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966 unsafe extern "unadjusted" {
967 #[cfg_attr(
968 any(target_arch = "aarch64", target_arch = "arm64ec"),
969 link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970 )]
971 fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972 }
973 unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982 unsafe extern "unadjusted" {
983 #[cfg_attr(
984 any(target_arch = "aarch64", target_arch = "arm64ec"),
985 link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986 )]
987 fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988 }
989 unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998 unsafe extern "unadjusted" {
999 #[cfg_attr(
1000 any(target_arch = "aarch64", target_arch = "arm64ec"),
1001 link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002 )]
1003 fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004 }
1005 unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014 unsafe extern "unadjusted" {
1015 #[cfg_attr(
1016 any(target_arch = "aarch64", target_arch = "arm64ec"),
1017 link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018 )]
1019 fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020 }
1021 unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032 unsafe extern "unadjusted" {
1033 #[cfg_attr(
1034 any(target_arch = "aarch64", target_arch = "arm64ec"),
1035 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036 )]
1037 fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038 }
1039 unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050 unsafe extern "unadjusted" {
1051 #[cfg_attr(
1052 any(target_arch = "aarch64", target_arch = "arm64ec"),
1053 link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054 )]
1055 fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056 }
1057 unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066 unsafe extern "unadjusted" {
1067 #[cfg_attr(
1068 any(target_arch = "aarch64", target_arch = "arm64ec"),
1069 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070 )]
1071 fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072 }
1073 unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082 unsafe extern "unadjusted" {
1083 #[cfg_attr(
1084 any(target_arch = "aarch64", target_arch = "arm64ec"),
1085 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086 )]
1087 fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088 }
1089 unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098 unsafe extern "unadjusted" {
1099 #[cfg_attr(
1100 any(target_arch = "aarch64", target_arch = "arm64ec"),
1101 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102 )]
1103 fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104 }
1105 unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116 unsafe extern "unadjusted" {
1117 #[cfg_attr(
1118 any(target_arch = "aarch64", target_arch = "arm64ec"),
1119 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120 )]
1121 fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122 }
1123 unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134 unsafe extern "unadjusted" {
1135 #[cfg_attr(
1136 any(target_arch = "aarch64", target_arch = "arm64ec"),
1137 link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138 )]
1139 fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140 }
1141 unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150 unsafe extern "unadjusted" {
1151 #[cfg_attr(
1152 any(target_arch = "aarch64", target_arch = "arm64ec"),
1153 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154 )]
1155 fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156 }
1157 unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166 unsafe extern "unadjusted" {
1167 #[cfg_attr(
1168 any(target_arch = "aarch64", target_arch = "arm64ec"),
1169 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170 )]
1171 fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172 }
1173 unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182 unsafe extern "unadjusted" {
1183 #[cfg_attr(
1184 any(target_arch = "aarch64", target_arch = "arm64ec"),
1185 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186 )]
1187 fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188 }
1189 unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198 unsafe extern "unadjusted" {
1199 #[cfg_attr(
1200 any(target_arch = "aarch64", target_arch = "arm64ec"),
1201 link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202 )]
1203 fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204 }
1205 unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214 unsafe extern "unadjusted" {
1215 #[cfg_attr(
1216 any(target_arch = "aarch64", target_arch = "arm64ec"),
1217 link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218 )]
1219 fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220 }
1221 unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230 unsafe extern "unadjusted" {
1231 #[cfg_attr(
1232 any(target_arch = "aarch64", target_arch = "arm64ec"),
1233 link_name = "llvm.aarch64.neon.facge.i64.f64"
1234 )]
1235 fn _vcaged_f64(a: f64, b: f64) -> u64;
1236 }
1237 unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246 unsafe extern "unadjusted" {
1247 #[cfg_attr(
1248 any(target_arch = "aarch64", target_arch = "arm64ec"),
1249 link_name = "llvm.aarch64.neon.facge.i32.f32"
1250 )]
1251 fn _vcages_f32(a: f32, b: f32) -> u32;
1252 }
1253 unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263 unsafe extern "unadjusted" {
1264 #[cfg_attr(
1265 any(target_arch = "aarch64", target_arch = "arm64ec"),
1266 link_name = "llvm.aarch64.neon.facge.i32.f16"
1267 )]
1268 fn _vcageh_f16(a: f16, b: f16) -> i32;
1269 }
1270 unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279 unsafe extern "unadjusted" {
1280 #[cfg_attr(
1281 any(target_arch = "aarch64", target_arch = "arm64ec"),
1282 link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283 )]
1284 fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285 }
1286 unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295 unsafe extern "unadjusted" {
1296 #[cfg_attr(
1297 any(target_arch = "aarch64", target_arch = "arm64ec"),
1298 link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299 )]
1300 fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301 }
1302 unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311 unsafe extern "unadjusted" {
1312 #[cfg_attr(
1313 any(target_arch = "aarch64", target_arch = "arm64ec"),
1314 link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315 )]
1316 fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317 }
1318 unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327 unsafe extern "unadjusted" {
1328 #[cfg_attr(
1329 any(target_arch = "aarch64", target_arch = "arm64ec"),
1330 link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331 )]
1332 fn _vcagts_f32(a: f32, b: f32) -> u32;
1333 }
1334 unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344 unsafe extern "unadjusted" {
1345 #[cfg_attr(
1346 any(target_arch = "aarch64", target_arch = "arm64ec"),
1347 link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348 )]
1349 fn _vcagth_f16(a: f16, b: f16) -> i32;
1350 }
1351 unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360 vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369 vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378 vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387 vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397 vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406 vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415 vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424 vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433 vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443 vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452 unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461 unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470 unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479 unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488 unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497 unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506 unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515 unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524 unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533 unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542 unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551 unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561 unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571 let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572 unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582 let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583 unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592 let b: f32x2 = f32x2::new(0.0, 0.0);
1593 unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603 unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612 let b: f64 = 0.0;
1613 unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622 let b: f64x2 = f64x2::new(0.0, 0.0);
1623 unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633 unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643 unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653 unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663 unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672 let b: i32x2 = i32x2::new(0, 0);
1673 unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683 unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692 let b: i64x1 = i64x1::new(0);
1693 unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702 let b: i64x2 = i64x2::new(0, 0);
1703 unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713 unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723 unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732 let b: i64x1 = i64x1::new(0);
1733 unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742 let b: i64x2 = i64x2::new(0, 0);
1743 unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752 let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753 unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762 let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763 unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772 let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773 unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782 let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783 unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792 let b: u32x2 = u32x2::new(0, 0);
1793 unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802 let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803 unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812 let b: u64x1 = u64x1::new(0);
1813 unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822 let b: u64x2 = u64x2::new(0, 0);
1823 unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832 unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841 unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851 unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860 unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869 unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878 unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887 unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896 unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905 unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914 unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923 unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932 unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941 unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950 unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959 unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969 unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978 let b: f32x2 = f32x2::new(0.0, 0.0);
1979 unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989 unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998 let b: f64 = 0.0;
1999 unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008 let b: f64x2 = f64x2::new(0.0, 0.0);
2009 unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019 unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029 unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039 unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049 unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058 let b: i32x2 = i32x2::new(0, 0);
2059 unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069 unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078 let b: i64x1 = i64x1::new(0);
2079 unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088 let b: i64x2 = i64x2::new(0, 0);
2089 unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098 unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107 unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116 unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126 unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135 unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144 unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153 unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162 unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171 unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180 unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189 unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198 unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207 unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216 unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226 unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235 let b: f32x2 = f32x2::new(0.0, 0.0);
2236 unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246 unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255 let b: f64 = 0.0;
2256 unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265 let b: f64x2 = f64x2::new(0.0, 0.0);
2266 unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276 unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286 unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296 unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306 unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315 let b: i32x2 = i32x2::new(0, 0);
2316 unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326 unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335 let b: i64x1 = i64x1::new(0);
2336 unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345 let b: i64x2 = i64x2::new(0, 0);
2346 unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355 unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364 unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373 unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383 unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392 unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401 unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410 unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419 unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428 unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437 unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446 unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455 unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464 unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473 unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483 unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492 let b: f32x2 = f32x2::new(0.0, 0.0);
2493 unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503 unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512 let b: f64 = 0.0;
2513 unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522 let b: f64x2 = f64x2::new(0.0, 0.0);
2523 unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533 unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543 unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553 unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563 unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572 let b: i32x2 = i32x2::new(0, 0);
2573 unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583 unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592 let b: i64x1 = i64x1::new(0);
2593 unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602 let b: i64x2 = i64x2::new(0, 0);
2603 unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612 unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621 unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630 unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640 unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649 unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658 unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667 unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676 unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685 unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694 unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703 unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712 unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722 unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731 unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740 unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749 let b: f32x2 = f32x2::new(0.0, 0.0);
2750 unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760 unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769 let b: f64 = 0.0;
2770 unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779 let b: f64x2 = f64x2::new(0.0, 0.0);
2780 unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790 unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800 unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810 unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820 unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829 let b: i32x2 = i32x2::new(0, 0);
2830 unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840 unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849 let b: i64x1 = i64x1::new(0);
2850 unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859 let b: i64x2 = i64x2::new(0, 0);
2860 unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869 unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878 unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887 unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897 unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908 unsafe extern "unadjusted" {
2909 #[cfg_attr(
2910 any(target_arch = "aarch64", target_arch = "arm64ec"),
2911 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912 )]
2913 fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914 }
2915 unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926 unsafe extern "unadjusted" {
2927 #[cfg_attr(
2928 any(target_arch = "aarch64", target_arch = "arm64ec"),
2929 link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930 )]
2931 fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932 }
2933 unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942 unsafe extern "unadjusted" {
2943 #[cfg_attr(
2944 any(target_arch = "aarch64", target_arch = "arm64ec"),
2945 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946 )]
2947 fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948 }
2949 unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958 unsafe extern "unadjusted" {
2959 #[cfg_attr(
2960 any(target_arch = "aarch64", target_arch = "arm64ec"),
2961 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962 )]
2963 fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964 }
2965 unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974 unsafe extern "unadjusted" {
2975 #[cfg_attr(
2976 any(target_arch = "aarch64", target_arch = "arm64ec"),
2977 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978 )]
2979 fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980 }
2981 unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993 a: float16x4_t,
2994 b: float16x4_t,
2995 c: float16x4_t,
2996) -> float16x4_t {
2997 static_assert_uimm_bits!(LANE, 1);
2998 unsafe {
2999 let c: float16x4_t = simd_shuffle!(
3000 c,
3001 c,
3002 [
3003 2 * LANE as u32,
3004 2 * LANE as u32 + 1,
3005 2 * LANE as u32,
3006 2 * LANE as u32 + 1
3007 ]
3008 );
3009 vcmla_f16(a, b, c)
3010 }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022 a: float16x8_t,
3023 b: float16x8_t,
3024 c: float16x4_t,
3025) -> float16x8_t {
3026 static_assert_uimm_bits!(LANE, 1);
3027 unsafe {
3028 let c: float16x8_t = simd_shuffle!(
3029 c,
3030 c,
3031 [
3032 2 * LANE as u32,
3033 2 * LANE as u32 + 1,
3034 2 * LANE as u32,
3035 2 * LANE as u32 + 1,
3036 2 * LANE as u32,
3037 2 * LANE as u32 + 1,
3038 2 * LANE as u32,
3039 2 * LANE as u32 + 1
3040 ]
3041 );
3042 vcmlaq_f16(a, b, c)
3043 }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053 a: float32x2_t,
3054 b: float32x2_t,
3055 c: float32x2_t,
3056) -> float32x2_t {
3057 static_assert!(LANE == 0);
3058 unsafe {
3059 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060 vcmla_f32(a, b, c)
3061 }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071 a: float32x4_t,
3072 b: float32x4_t,
3073 c: float32x2_t,
3074) -> float32x4_t {
3075 static_assert!(LANE == 0);
3076 unsafe {
3077 let c: float32x4_t = simd_shuffle!(
3078 c,
3079 c,
3080 [
3081 2 * LANE as u32,
3082 2 * LANE as u32 + 1,
3083 2 * LANE as u32,
3084 2 * LANE as u32 + 1
3085 ]
3086 );
3087 vcmlaq_f32(a, b, c)
3088 }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100 a: float16x4_t,
3101 b: float16x4_t,
3102 c: float16x8_t,
3103) -> float16x4_t {
3104 static_assert_uimm_bits!(LANE, 2);
3105 unsafe {
3106 let c: float16x4_t = simd_shuffle!(
3107 c,
3108 c,
3109 [
3110 2 * LANE as u32,
3111 2 * LANE as u32 + 1,
3112 2 * LANE as u32,
3113 2 * LANE as u32 + 1
3114 ]
3115 );
3116 vcmla_f16(a, b, c)
3117 }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129 a: float16x8_t,
3130 b: float16x8_t,
3131 c: float16x8_t,
3132) -> float16x8_t {
3133 static_assert_uimm_bits!(LANE, 2);
3134 unsafe {
3135 let c: float16x8_t = simd_shuffle!(
3136 c,
3137 c,
3138 [
3139 2 * LANE as u32,
3140 2 * LANE as u32 + 1,
3141 2 * LANE as u32,
3142 2 * LANE as u32 + 1,
3143 2 * LANE as u32,
3144 2 * LANE as u32 + 1,
3145 2 * LANE as u32,
3146 2 * LANE as u32 + 1
3147 ]
3148 );
3149 vcmlaq_f16(a, b, c)
3150 }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160 a: float32x2_t,
3161 b: float32x2_t,
3162 c: float32x4_t,
3163) -> float32x2_t {
3164 static_assert_uimm_bits!(LANE, 1);
3165 unsafe {
3166 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167 vcmla_f32(a, b, c)
3168 }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178 a: float32x4_t,
3179 b: float32x4_t,
3180 c: float32x4_t,
3181) -> float32x4_t {
3182 static_assert_uimm_bits!(LANE, 1);
3183 unsafe {
3184 let c: float32x4_t = simd_shuffle!(
3185 c,
3186 c,
3187 [
3188 2 * LANE as u32,
3189 2 * LANE as u32 + 1,
3190 2 * LANE as u32,
3191 2 * LANE as u32 + 1
3192 ]
3193 );
3194 vcmlaq_f32(a, b, c)
3195 }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206 unsafe extern "unadjusted" {
3207 #[cfg_attr(
3208 any(target_arch = "aarch64", target_arch = "arm64ec"),
3209 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210 )]
3211 fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212 }
3213 unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224 unsafe extern "unadjusted" {
3225 #[cfg_attr(
3226 any(target_arch = "aarch64", target_arch = "arm64ec"),
3227 link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228 )]
3229 fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230 }
3231 unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240 unsafe extern "unadjusted" {
3241 #[cfg_attr(
3242 any(target_arch = "aarch64", target_arch = "arm64ec"),
3243 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244 )]
3245 fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246 }
3247 unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256 unsafe extern "unadjusted" {
3257 #[cfg_attr(
3258 any(target_arch = "aarch64", target_arch = "arm64ec"),
3259 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260 )]
3261 fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262 }
3263 unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272 unsafe extern "unadjusted" {
3273 #[cfg_attr(
3274 any(target_arch = "aarch64", target_arch = "arm64ec"),
3275 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276 )]
3277 fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278 }
3279 unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291 a: float16x4_t,
3292 b: float16x4_t,
3293 c: float16x4_t,
3294) -> float16x4_t {
3295 static_assert_uimm_bits!(LANE, 1);
3296 unsafe {
3297 let c: float16x4_t = simd_shuffle!(
3298 c,
3299 c,
3300 [
3301 2 * LANE as u32,
3302 2 * LANE as u32 + 1,
3303 2 * LANE as u32,
3304 2 * LANE as u32 + 1
3305 ]
3306 );
3307 vcmla_rot180_f16(a, b, c)
3308 }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320 a: float16x8_t,
3321 b: float16x8_t,
3322 c: float16x4_t,
3323) -> float16x8_t {
3324 static_assert_uimm_bits!(LANE, 1);
3325 unsafe {
3326 let c: float16x8_t = simd_shuffle!(
3327 c,
3328 c,
3329 [
3330 2 * LANE as u32,
3331 2 * LANE as u32 + 1,
3332 2 * LANE as u32,
3333 2 * LANE as u32 + 1,
3334 2 * LANE as u32,
3335 2 * LANE as u32 + 1,
3336 2 * LANE as u32,
3337 2 * LANE as u32 + 1
3338 ]
3339 );
3340 vcmlaq_rot180_f16(a, b, c)
3341 }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351 a: float32x2_t,
3352 b: float32x2_t,
3353 c: float32x2_t,
3354) -> float32x2_t {
3355 static_assert!(LANE == 0);
3356 unsafe {
3357 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358 vcmla_rot180_f32(a, b, c)
3359 }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369 a: float32x4_t,
3370 b: float32x4_t,
3371 c: float32x2_t,
3372) -> float32x4_t {
3373 static_assert!(LANE == 0);
3374 unsafe {
3375 let c: float32x4_t = simd_shuffle!(
3376 c,
3377 c,
3378 [
3379 2 * LANE as u32,
3380 2 * LANE as u32 + 1,
3381 2 * LANE as u32,
3382 2 * LANE as u32 + 1
3383 ]
3384 );
3385 vcmlaq_rot180_f32(a, b, c)
3386 }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398 a: float16x4_t,
3399 b: float16x4_t,
3400 c: float16x8_t,
3401) -> float16x4_t {
3402 static_assert_uimm_bits!(LANE, 2);
3403 unsafe {
3404 let c: float16x4_t = simd_shuffle!(
3405 c,
3406 c,
3407 [
3408 2 * LANE as u32,
3409 2 * LANE as u32 + 1,
3410 2 * LANE as u32,
3411 2 * LANE as u32 + 1
3412 ]
3413 );
3414 vcmla_rot180_f16(a, b, c)
3415 }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427 a: float16x8_t,
3428 b: float16x8_t,
3429 c: float16x8_t,
3430) -> float16x8_t {
3431 static_assert_uimm_bits!(LANE, 2);
3432 unsafe {
3433 let c: float16x8_t = simd_shuffle!(
3434 c,
3435 c,
3436 [
3437 2 * LANE as u32,
3438 2 * LANE as u32 + 1,
3439 2 * LANE as u32,
3440 2 * LANE as u32 + 1,
3441 2 * LANE as u32,
3442 2 * LANE as u32 + 1,
3443 2 * LANE as u32,
3444 2 * LANE as u32 + 1
3445 ]
3446 );
3447 vcmlaq_rot180_f16(a, b, c)
3448 }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458 a: float32x2_t,
3459 b: float32x2_t,
3460 c: float32x4_t,
3461) -> float32x2_t {
3462 static_assert_uimm_bits!(LANE, 1);
3463 unsafe {
3464 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465 vcmla_rot180_f32(a, b, c)
3466 }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476 a: float32x4_t,
3477 b: float32x4_t,
3478 c: float32x4_t,
3479) -> float32x4_t {
3480 static_assert_uimm_bits!(LANE, 1);
3481 unsafe {
3482 let c: float32x4_t = simd_shuffle!(
3483 c,
3484 c,
3485 [
3486 2 * LANE as u32,
3487 2 * LANE as u32 + 1,
3488 2 * LANE as u32,
3489 2 * LANE as u32 + 1
3490 ]
3491 );
3492 vcmlaq_rot180_f32(a, b, c)
3493 }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504 unsafe extern "unadjusted" {
3505 #[cfg_attr(
3506 any(target_arch = "aarch64", target_arch = "arm64ec"),
3507 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508 )]
3509 fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510 }
3511 unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522 unsafe extern "unadjusted" {
3523 #[cfg_attr(
3524 any(target_arch = "aarch64", target_arch = "arm64ec"),
3525 link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526 )]
3527 fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528 }
3529 unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538 unsafe extern "unadjusted" {
3539 #[cfg_attr(
3540 any(target_arch = "aarch64", target_arch = "arm64ec"),
3541 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542 )]
3543 fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544 }
3545 unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554 unsafe extern "unadjusted" {
3555 #[cfg_attr(
3556 any(target_arch = "aarch64", target_arch = "arm64ec"),
3557 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558 )]
3559 fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560 }
3561 unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570 unsafe extern "unadjusted" {
3571 #[cfg_attr(
3572 any(target_arch = "aarch64", target_arch = "arm64ec"),
3573 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574 )]
3575 fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576 }
3577 unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589 a: float16x4_t,
3590 b: float16x4_t,
3591 c: float16x4_t,
3592) -> float16x4_t {
3593 static_assert_uimm_bits!(LANE, 1);
3594 unsafe {
3595 let c: float16x4_t = simd_shuffle!(
3596 c,
3597 c,
3598 [
3599 2 * LANE as u32,
3600 2 * LANE as u32 + 1,
3601 2 * LANE as u32,
3602 2 * LANE as u32 + 1
3603 ]
3604 );
3605 vcmla_rot270_f16(a, b, c)
3606 }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618 a: float16x8_t,
3619 b: float16x8_t,
3620 c: float16x4_t,
3621) -> float16x8_t {
3622 static_assert_uimm_bits!(LANE, 1);
3623 unsafe {
3624 let c: float16x8_t = simd_shuffle!(
3625 c,
3626 c,
3627 [
3628 2 * LANE as u32,
3629 2 * LANE as u32 + 1,
3630 2 * LANE as u32,
3631 2 * LANE as u32 + 1,
3632 2 * LANE as u32,
3633 2 * LANE as u32 + 1,
3634 2 * LANE as u32,
3635 2 * LANE as u32 + 1
3636 ]
3637 );
3638 vcmlaq_rot270_f16(a, b, c)
3639 }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649 a: float32x2_t,
3650 b: float32x2_t,
3651 c: float32x2_t,
3652) -> float32x2_t {
3653 static_assert!(LANE == 0);
3654 unsafe {
3655 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656 vcmla_rot270_f32(a, b, c)
3657 }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667 a: float32x4_t,
3668 b: float32x4_t,
3669 c: float32x2_t,
3670) -> float32x4_t {
3671 static_assert!(LANE == 0);
3672 unsafe {
3673 let c: float32x4_t = simd_shuffle!(
3674 c,
3675 c,
3676 [
3677 2 * LANE as u32,
3678 2 * LANE as u32 + 1,
3679 2 * LANE as u32,
3680 2 * LANE as u32 + 1
3681 ]
3682 );
3683 vcmlaq_rot270_f32(a, b, c)
3684 }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696 a: float16x4_t,
3697 b: float16x4_t,
3698 c: float16x8_t,
3699) -> float16x4_t {
3700 static_assert_uimm_bits!(LANE, 2);
3701 unsafe {
3702 let c: float16x4_t = simd_shuffle!(
3703 c,
3704 c,
3705 [
3706 2 * LANE as u32,
3707 2 * LANE as u32 + 1,
3708 2 * LANE as u32,
3709 2 * LANE as u32 + 1
3710 ]
3711 );
3712 vcmla_rot270_f16(a, b, c)
3713 }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725 a: float16x8_t,
3726 b: float16x8_t,
3727 c: float16x8_t,
3728) -> float16x8_t {
3729 static_assert_uimm_bits!(LANE, 2);
3730 unsafe {
3731 let c: float16x8_t = simd_shuffle!(
3732 c,
3733 c,
3734 [
3735 2 * LANE as u32,
3736 2 * LANE as u32 + 1,
3737 2 * LANE as u32,
3738 2 * LANE as u32 + 1,
3739 2 * LANE as u32,
3740 2 * LANE as u32 + 1,
3741 2 * LANE as u32,
3742 2 * LANE as u32 + 1
3743 ]
3744 );
3745 vcmlaq_rot270_f16(a, b, c)
3746 }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756 a: float32x2_t,
3757 b: float32x2_t,
3758 c: float32x4_t,
3759) -> float32x2_t {
3760 static_assert_uimm_bits!(LANE, 1);
3761 unsafe {
3762 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763 vcmla_rot270_f32(a, b, c)
3764 }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774 a: float32x4_t,
3775 b: float32x4_t,
3776 c: float32x4_t,
3777) -> float32x4_t {
3778 static_assert_uimm_bits!(LANE, 1);
3779 unsafe {
3780 let c: float32x4_t = simd_shuffle!(
3781 c,
3782 c,
3783 [
3784 2 * LANE as u32,
3785 2 * LANE as u32 + 1,
3786 2 * LANE as u32,
3787 2 * LANE as u32 + 1
3788 ]
3789 );
3790 vcmlaq_rot270_f32(a, b, c)
3791 }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802 unsafe extern "unadjusted" {
3803 #[cfg_attr(
3804 any(target_arch = "aarch64", target_arch = "arm64ec"),
3805 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806 )]
3807 fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808 }
3809 unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820 unsafe extern "unadjusted" {
3821 #[cfg_attr(
3822 any(target_arch = "aarch64", target_arch = "arm64ec"),
3823 link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824 )]
3825 fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826 }
3827 unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836 unsafe extern "unadjusted" {
3837 #[cfg_attr(
3838 any(target_arch = "aarch64", target_arch = "arm64ec"),
3839 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840 )]
3841 fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842 }
3843 unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852 unsafe extern "unadjusted" {
3853 #[cfg_attr(
3854 any(target_arch = "aarch64", target_arch = "arm64ec"),
3855 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856 )]
3857 fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858 }
3859 unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868 unsafe extern "unadjusted" {
3869 #[cfg_attr(
3870 any(target_arch = "aarch64", target_arch = "arm64ec"),
3871 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872 )]
3873 fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874 }
3875 unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887 a: float16x4_t,
3888 b: float16x4_t,
3889 c: float16x4_t,
3890) -> float16x4_t {
3891 static_assert_uimm_bits!(LANE, 1);
3892 unsafe {
3893 let c: float16x4_t = simd_shuffle!(
3894 c,
3895 c,
3896 [
3897 2 * LANE as u32,
3898 2 * LANE as u32 + 1,
3899 2 * LANE as u32,
3900 2 * LANE as u32 + 1
3901 ]
3902 );
3903 vcmla_rot90_f16(a, b, c)
3904 }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916 a: float16x8_t,
3917 b: float16x8_t,
3918 c: float16x4_t,
3919) -> float16x8_t {
3920 static_assert_uimm_bits!(LANE, 1);
3921 unsafe {
3922 let c: float16x8_t = simd_shuffle!(
3923 c,
3924 c,
3925 [
3926 2 * LANE as u32,
3927 2 * LANE as u32 + 1,
3928 2 * LANE as u32,
3929 2 * LANE as u32 + 1,
3930 2 * LANE as u32,
3931 2 * LANE as u32 + 1,
3932 2 * LANE as u32,
3933 2 * LANE as u32 + 1
3934 ]
3935 );
3936 vcmlaq_rot90_f16(a, b, c)
3937 }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947 a: float32x2_t,
3948 b: float32x2_t,
3949 c: float32x2_t,
3950) -> float32x2_t {
3951 static_assert!(LANE == 0);
3952 unsafe {
3953 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954 vcmla_rot90_f32(a, b, c)
3955 }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965 a: float32x4_t,
3966 b: float32x4_t,
3967 c: float32x2_t,
3968) -> float32x4_t {
3969 static_assert!(LANE == 0);
3970 unsafe {
3971 let c: float32x4_t = simd_shuffle!(
3972 c,
3973 c,
3974 [
3975 2 * LANE as u32,
3976 2 * LANE as u32 + 1,
3977 2 * LANE as u32,
3978 2 * LANE as u32 + 1
3979 ]
3980 );
3981 vcmlaq_rot90_f32(a, b, c)
3982 }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994 a: float16x4_t,
3995 b: float16x4_t,
3996 c: float16x8_t,
3997) -> float16x4_t {
3998 static_assert_uimm_bits!(LANE, 2);
3999 unsafe {
4000 let c: float16x4_t = simd_shuffle!(
4001 c,
4002 c,
4003 [
4004 2 * LANE as u32,
4005 2 * LANE as u32 + 1,
4006 2 * LANE as u32,
4007 2 * LANE as u32 + 1
4008 ]
4009 );
4010 vcmla_rot90_f16(a, b, c)
4011 }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023 a: float16x8_t,
4024 b: float16x8_t,
4025 c: float16x8_t,
4026) -> float16x8_t {
4027 static_assert_uimm_bits!(LANE, 2);
4028 unsafe {
4029 let c: float16x8_t = simd_shuffle!(
4030 c,
4031 c,
4032 [
4033 2 * LANE as u32,
4034 2 * LANE as u32 + 1,
4035 2 * LANE as u32,
4036 2 * LANE as u32 + 1,
4037 2 * LANE as u32,
4038 2 * LANE as u32 + 1,
4039 2 * LANE as u32,
4040 2 * LANE as u32 + 1
4041 ]
4042 );
4043 vcmlaq_rot90_f16(a, b, c)
4044 }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054 a: float32x2_t,
4055 b: float32x2_t,
4056 c: float32x4_t,
4057) -> float32x2_t {
4058 static_assert_uimm_bits!(LANE, 1);
4059 unsafe {
4060 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061 vcmla_rot90_f32(a, b, c)
4062 }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072 a: float32x4_t,
4073 b: float32x4_t,
4074 c: float32x4_t,
4075) -> float32x4_t {
4076 static_assert_uimm_bits!(LANE, 1);
4077 unsafe {
4078 let c: float32x4_t = simd_shuffle!(
4079 c,
4080 c,
4081 [
4082 2 * LANE as u32,
4083 2 * LANE as u32 + 1,
4084 2 * LANE as u32,
4085 2 * LANE as u32 + 1
4086 ]
4087 );
4088 vcmlaq_rot90_f32(a, b, c)
4089 }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099 a: float32x2_t,
4100 b: float32x2_t,
4101) -> float32x2_t {
4102 static_assert_uimm_bits!(LANE1, 1);
4103 static_assert_uimm_bits!(LANE2, 1);
4104 unsafe {
4105 match LANE1 & 0b1 {
4106 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108 _ => unreachable_unchecked(),
4109 }
4110 }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120 static_assert_uimm_bits!(LANE1, 3);
4121 static_assert_uimm_bits!(LANE2, 3);
4122 unsafe {
4123 match LANE1 & 0b111 {
4124 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132 _ => unreachable_unchecked(),
4133 }
4134 }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144 static_assert_uimm_bits!(LANE1, 2);
4145 static_assert_uimm_bits!(LANE2, 2);
4146 unsafe {
4147 match LANE1 & 0b11 {
4148 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152 _ => unreachable_unchecked(),
4153 }
4154 }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164 static_assert_uimm_bits!(LANE1, 1);
4165 static_assert_uimm_bits!(LANE2, 1);
4166 unsafe {
4167 match LANE1 & 0b1 {
4168 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170 _ => unreachable_unchecked(),
4171 }
4172 }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182 static_assert_uimm_bits!(LANE1, 3);
4183 static_assert_uimm_bits!(LANE2, 3);
4184 unsafe {
4185 match LANE1 & 0b111 {
4186 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194 _ => unreachable_unchecked(),
4195 }
4196 }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206 a: uint16x4_t,
4207 b: uint16x4_t,
4208) -> uint16x4_t {
4209 static_assert_uimm_bits!(LANE1, 2);
4210 static_assert_uimm_bits!(LANE2, 2);
4211 unsafe {
4212 match LANE1 & 0b11 {
4213 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217 _ => unreachable_unchecked(),
4218 }
4219 }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229 a: uint32x2_t,
4230 b: uint32x2_t,
4231) -> uint32x2_t {
4232 static_assert_uimm_bits!(LANE1, 1);
4233 static_assert_uimm_bits!(LANE2, 1);
4234 unsafe {
4235 match LANE1 & 0b1 {
4236 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238 _ => unreachable_unchecked(),
4239 }
4240 }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250 static_assert_uimm_bits!(LANE1, 3);
4251 static_assert_uimm_bits!(LANE2, 3);
4252 unsafe {
4253 match LANE1 & 0b111 {
4254 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262 _ => unreachable_unchecked(),
4263 }
4264 }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274 a: poly16x4_t,
4275 b: poly16x4_t,
4276) -> poly16x4_t {
4277 static_assert_uimm_bits!(LANE1, 2);
4278 static_assert_uimm_bits!(LANE2, 2);
4279 unsafe {
4280 match LANE1 & 0b11 {
4281 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285 _ => unreachable_unchecked(),
4286 }
4287 }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297 a: float32x2_t,
4298 b: float32x4_t,
4299) -> float32x2_t {
4300 static_assert_uimm_bits!(LANE1, 1);
4301 static_assert_uimm_bits!(LANE2, 2);
4302 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303 unsafe {
4304 match LANE1 & 0b1 {
4305 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307 _ => unreachable_unchecked(),
4308 }
4309 }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319 static_assert_uimm_bits!(LANE1, 3);
4320 static_assert_uimm_bits!(LANE2, 4);
4321 let a: int8x16_t =
4322 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323 unsafe {
4324 match LANE1 & 0b111 {
4325 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333 _ => unreachable_unchecked(),
4334 }
4335 }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345 a: int16x4_t,
4346 b: int16x8_t,
4347) -> int16x4_t {
4348 static_assert_uimm_bits!(LANE1, 2);
4349 static_assert_uimm_bits!(LANE2, 3);
4350 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351 unsafe {
4352 match LANE1 & 0b11 {
4353 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357 _ => unreachable_unchecked(),
4358 }
4359 }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369 a: int32x2_t,
4370 b: int32x4_t,
4371) -> int32x2_t {
4372 static_assert_uimm_bits!(LANE1, 1);
4373 static_assert_uimm_bits!(LANE2, 2);
4374 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375 unsafe {
4376 match LANE1 & 0b1 {
4377 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379 _ => unreachable_unchecked(),
4380 }
4381 }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391 a: uint8x8_t,
4392 b: uint8x16_t,
4393) -> uint8x8_t {
4394 static_assert_uimm_bits!(LANE1, 3);
4395 static_assert_uimm_bits!(LANE2, 4);
4396 let a: uint8x16_t =
4397 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398 unsafe {
4399 match LANE1 & 0b111 {
4400 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408 _ => unreachable_unchecked(),
4409 }
4410 }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420 a: uint16x4_t,
4421 b: uint16x8_t,
4422) -> uint16x4_t {
4423 static_assert_uimm_bits!(LANE1, 2);
4424 static_assert_uimm_bits!(LANE2, 3);
4425 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426 unsafe {
4427 match LANE1 & 0b11 {
4428 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432 _ => unreachable_unchecked(),
4433 }
4434 }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444 a: uint32x2_t,
4445 b: uint32x4_t,
4446) -> uint32x2_t {
4447 static_assert_uimm_bits!(LANE1, 1);
4448 static_assert_uimm_bits!(LANE2, 2);
4449 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450 unsafe {
4451 match LANE1 & 0b1 {
4452 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454 _ => unreachable_unchecked(),
4455 }
4456 }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466 a: poly8x8_t,
4467 b: poly8x16_t,
4468) -> poly8x8_t {
4469 static_assert_uimm_bits!(LANE1, 3);
4470 static_assert_uimm_bits!(LANE2, 4);
4471 let a: poly8x16_t =
4472 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473 unsafe {
4474 match LANE1 & 0b111 {
4475 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483 _ => unreachable_unchecked(),
4484 }
4485 }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495 a: poly16x4_t,
4496 b: poly16x8_t,
4497) -> poly16x4_t {
4498 static_assert_uimm_bits!(LANE1, 2);
4499 static_assert_uimm_bits!(LANE2, 3);
4500 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501 unsafe {
4502 match LANE1 & 0b11 {
4503 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507 _ => unreachable_unchecked(),
4508 }
4509 }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519 a: float32x4_t,
4520 b: float32x2_t,
4521) -> float32x4_t {
4522 static_assert_uimm_bits!(LANE1, 2);
4523 static_assert_uimm_bits!(LANE2, 1);
4524 let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525 unsafe {
4526 match LANE1 & 0b11 {
4527 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531 _ => unreachable_unchecked(),
4532 }
4533 }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543 a: float64x2_t,
4544 b: float64x1_t,
4545) -> float64x2_t {
4546 static_assert_uimm_bits!(LANE1, 1);
4547 static_assert!(LANE2 == 0);
4548 let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549 unsafe {
4550 match LANE1 & 0b1 {
4551 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553 _ => unreachable_unchecked(),
4554 }
4555 }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565 a: int64x2_t,
4566 b: int64x1_t,
4567) -> int64x2_t {
4568 static_assert_uimm_bits!(LANE1, 1);
4569 static_assert!(LANE2 == 0);
4570 let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571 unsafe {
4572 match LANE1 & 0b1 {
4573 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575 _ => unreachable_unchecked(),
4576 }
4577 }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587 a: uint64x2_t,
4588 b: uint64x1_t,
4589) -> uint64x2_t {
4590 static_assert_uimm_bits!(LANE1, 1);
4591 static_assert!(LANE2 == 0);
4592 let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593 unsafe {
4594 match LANE1 & 0b1 {
4595 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597 _ => unreachable_unchecked(),
4598 }
4599 }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609 a: poly64x2_t,
4610 b: poly64x1_t,
4611) -> poly64x2_t {
4612 static_assert_uimm_bits!(LANE1, 1);
4613 static_assert!(LANE2 == 0);
4614 let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615 unsafe {
4616 match LANE1 & 0b1 {
4617 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619 _ => unreachable_unchecked(),
4620 }
4621 }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631 static_assert_uimm_bits!(LANE1, 4);
4632 static_assert_uimm_bits!(LANE2, 3);
4633 let b: int8x16_t =
4634 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635 unsafe {
4636 match LANE1 & 0b1111 {
4637 0 => simd_shuffle!(
4638 a,
4639 b,
4640 [
4641 16 + LANE2 as u32,
4642 1,
4643 2,
4644 3,
4645 4,
4646 5,
4647 6,
4648 7,
4649 8,
4650 9,
4651 10,
4652 11,
4653 12,
4654 13,
4655 14,
4656 15
4657 ]
4658 ),
4659 1 => simd_shuffle!(
4660 a,
4661 b,
4662 [
4663 0,
4664 16 + LANE2 as u32,
4665 2,
4666 3,
4667 4,
4668 5,
4669 6,
4670 7,
4671 8,
4672 9,
4673 10,
4674 11,
4675 12,
4676 13,
4677 14,
4678 15
4679 ]
4680 ),
4681 2 => simd_shuffle!(
4682 a,
4683 b,
4684 [
4685 0,
4686 1,
4687 16 + LANE2 as u32,
4688 3,
4689 4,
4690 5,
4691 6,
4692 7,
4693 8,
4694 9,
4695 10,
4696 11,
4697 12,
4698 13,
4699 14,
4700 15
4701 ]
4702 ),
4703 3 => simd_shuffle!(
4704 a,
4705 b,
4706 [
4707 0,
4708 1,
4709 2,
4710 16 + LANE2 as u32,
4711 4,
4712 5,
4713 6,
4714 7,
4715 8,
4716 9,
4717 10,
4718 11,
4719 12,
4720 13,
4721 14,
4722 15
4723 ]
4724 ),
4725 4 => simd_shuffle!(
4726 a,
4727 b,
4728 [
4729 0,
4730 1,
4731 2,
4732 3,
4733 16 + LANE2 as u32,
4734 5,
4735 6,
4736 7,
4737 8,
4738 9,
4739 10,
4740 11,
4741 12,
4742 13,
4743 14,
4744 15
4745 ]
4746 ),
4747 5 => simd_shuffle!(
4748 a,
4749 b,
4750 [
4751 0,
4752 1,
4753 2,
4754 3,
4755 4,
4756 16 + LANE2 as u32,
4757 6,
4758 7,
4759 8,
4760 9,
4761 10,
4762 11,
4763 12,
4764 13,
4765 14,
4766 15
4767 ]
4768 ),
4769 6 => simd_shuffle!(
4770 a,
4771 b,
4772 [
4773 0,
4774 1,
4775 2,
4776 3,
4777 4,
4778 5,
4779 16 + LANE2 as u32,
4780 7,
4781 8,
4782 9,
4783 10,
4784 11,
4785 12,
4786 13,
4787 14,
4788 15
4789 ]
4790 ),
4791 7 => simd_shuffle!(
4792 a,
4793 b,
4794 [
4795 0,
4796 1,
4797 2,
4798 3,
4799 4,
4800 5,
4801 6,
4802 16 + LANE2 as u32,
4803 8,
4804 9,
4805 10,
4806 11,
4807 12,
4808 13,
4809 14,
4810 15
4811 ]
4812 ),
4813 8 => simd_shuffle!(
4814 a,
4815 b,
4816 [
4817 0,
4818 1,
4819 2,
4820 3,
4821 4,
4822 5,
4823 6,
4824 7,
4825 16 + LANE2 as u32,
4826 9,
4827 10,
4828 11,
4829 12,
4830 13,
4831 14,
4832 15
4833 ]
4834 ),
4835 9 => simd_shuffle!(
4836 a,
4837 b,
4838 [
4839 0,
4840 1,
4841 2,
4842 3,
4843 4,
4844 5,
4845 6,
4846 7,
4847 8,
4848 16 + LANE2 as u32,
4849 10,
4850 11,
4851 12,
4852 13,
4853 14,
4854 15
4855 ]
4856 ),
4857 10 => simd_shuffle!(
4858 a,
4859 b,
4860 [
4861 0,
4862 1,
4863 2,
4864 3,
4865 4,
4866 5,
4867 6,
4868 7,
4869 8,
4870 9,
4871 16 + LANE2 as u32,
4872 11,
4873 12,
4874 13,
4875 14,
4876 15
4877 ]
4878 ),
4879 11 => simd_shuffle!(
4880 a,
4881 b,
4882 [
4883 0,
4884 1,
4885 2,
4886 3,
4887 4,
4888 5,
4889 6,
4890 7,
4891 8,
4892 9,
4893 10,
4894 16 + LANE2 as u32,
4895 12,
4896 13,
4897 14,
4898 15
4899 ]
4900 ),
4901 12 => simd_shuffle!(
4902 a,
4903 b,
4904 [
4905 0,
4906 1,
4907 2,
4908 3,
4909 4,
4910 5,
4911 6,
4912 7,
4913 8,
4914 9,
4915 10,
4916 11,
4917 16 + LANE2 as u32,
4918 13,
4919 14,
4920 15
4921 ]
4922 ),
4923 13 => simd_shuffle!(
4924 a,
4925 b,
4926 [
4927 0,
4928 1,
4929 2,
4930 3,
4931 4,
4932 5,
4933 6,
4934 7,
4935 8,
4936 9,
4937 10,
4938 11,
4939 12,
4940 16 + LANE2 as u32,
4941 14,
4942 15
4943 ]
4944 ),
4945 14 => simd_shuffle!(
4946 a,
4947 b,
4948 [
4949 0,
4950 1,
4951 2,
4952 3,
4953 4,
4954 5,
4955 6,
4956 7,
4957 8,
4958 9,
4959 10,
4960 11,
4961 12,
4962 13,
4963 16 + LANE2 as u32,
4964 15
4965 ]
4966 ),
4967 15 => simd_shuffle!(
4968 a,
4969 b,
4970 [
4971 0,
4972 1,
4973 2,
4974 3,
4975 4,
4976 5,
4977 6,
4978 7,
4979 8,
4980 9,
4981 10,
4982 11,
4983 12,
4984 13,
4985 14,
4986 16 + LANE2 as u32
4987 ]
4988 ),
4989 _ => unreachable_unchecked(),
4990 }
4991 }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001 a: int16x8_t,
5002 b: int16x4_t,
5003) -> int16x8_t {
5004 static_assert_uimm_bits!(LANE1, 3);
5005 static_assert_uimm_bits!(LANE2, 2);
5006 let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007 unsafe {
5008 match LANE1 & 0b111 {
5009 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017 _ => unreachable_unchecked(),
5018 }
5019 }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029 a: int32x4_t,
5030 b: int32x2_t,
5031) -> int32x4_t {
5032 static_assert_uimm_bits!(LANE1, 2);
5033 static_assert_uimm_bits!(LANE2, 1);
5034 let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035 unsafe {
5036 match LANE1 & 0b11 {
5037 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041 _ => unreachable_unchecked(),
5042 }
5043 }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053 a: uint8x16_t,
5054 b: uint8x8_t,
5055) -> uint8x16_t {
5056 static_assert_uimm_bits!(LANE1, 4);
5057 static_assert_uimm_bits!(LANE2, 3);
5058 let b: uint8x16_t =
5059 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060 unsafe {
5061 match LANE1 & 0b1111 {
5062 0 => simd_shuffle!(
5063 a,
5064 b,
5065 [
5066 16 + LANE2 as u32,
5067 1,
5068 2,
5069 3,
5070 4,
5071 5,
5072 6,
5073 7,
5074 8,
5075 9,
5076 10,
5077 11,
5078 12,
5079 13,
5080 14,
5081 15
5082 ]
5083 ),
5084 1 => simd_shuffle!(
5085 a,
5086 b,
5087 [
5088 0,
5089 16 + LANE2 as u32,
5090 2,
5091 3,
5092 4,
5093 5,
5094 6,
5095 7,
5096 8,
5097 9,
5098 10,
5099 11,
5100 12,
5101 13,
5102 14,
5103 15
5104 ]
5105 ),
5106 2 => simd_shuffle!(
5107 a,
5108 b,
5109 [
5110 0,
5111 1,
5112 16 + LANE2 as u32,
5113 3,
5114 4,
5115 5,
5116 6,
5117 7,
5118 8,
5119 9,
5120 10,
5121 11,
5122 12,
5123 13,
5124 14,
5125 15
5126 ]
5127 ),
5128 3 => simd_shuffle!(
5129 a,
5130 b,
5131 [
5132 0,
5133 1,
5134 2,
5135 16 + LANE2 as u32,
5136 4,
5137 5,
5138 6,
5139 7,
5140 8,
5141 9,
5142 10,
5143 11,
5144 12,
5145 13,
5146 14,
5147 15
5148 ]
5149 ),
5150 4 => simd_shuffle!(
5151 a,
5152 b,
5153 [
5154 0,
5155 1,
5156 2,
5157 3,
5158 16 + LANE2 as u32,
5159 5,
5160 6,
5161 7,
5162 8,
5163 9,
5164 10,
5165 11,
5166 12,
5167 13,
5168 14,
5169 15
5170 ]
5171 ),
5172 5 => simd_shuffle!(
5173 a,
5174 b,
5175 [
5176 0,
5177 1,
5178 2,
5179 3,
5180 4,
5181 16 + LANE2 as u32,
5182 6,
5183 7,
5184 8,
5185 9,
5186 10,
5187 11,
5188 12,
5189 13,
5190 14,
5191 15
5192 ]
5193 ),
5194 6 => simd_shuffle!(
5195 a,
5196 b,
5197 [
5198 0,
5199 1,
5200 2,
5201 3,
5202 4,
5203 5,
5204 16 + LANE2 as u32,
5205 7,
5206 8,
5207 9,
5208 10,
5209 11,
5210 12,
5211 13,
5212 14,
5213 15
5214 ]
5215 ),
5216 7 => simd_shuffle!(
5217 a,
5218 b,
5219 [
5220 0,
5221 1,
5222 2,
5223 3,
5224 4,
5225 5,
5226 6,
5227 16 + LANE2 as u32,
5228 8,
5229 9,
5230 10,
5231 11,
5232 12,
5233 13,
5234 14,
5235 15
5236 ]
5237 ),
5238 8 => simd_shuffle!(
5239 a,
5240 b,
5241 [
5242 0,
5243 1,
5244 2,
5245 3,
5246 4,
5247 5,
5248 6,
5249 7,
5250 16 + LANE2 as u32,
5251 9,
5252 10,
5253 11,
5254 12,
5255 13,
5256 14,
5257 15
5258 ]
5259 ),
5260 9 => simd_shuffle!(
5261 a,
5262 b,
5263 [
5264 0,
5265 1,
5266 2,
5267 3,
5268 4,
5269 5,
5270 6,
5271 7,
5272 8,
5273 16 + LANE2 as u32,
5274 10,
5275 11,
5276 12,
5277 13,
5278 14,
5279 15
5280 ]
5281 ),
5282 10 => simd_shuffle!(
5283 a,
5284 b,
5285 [
5286 0,
5287 1,
5288 2,
5289 3,
5290 4,
5291 5,
5292 6,
5293 7,
5294 8,
5295 9,
5296 16 + LANE2 as u32,
5297 11,
5298 12,
5299 13,
5300 14,
5301 15
5302 ]
5303 ),
5304 11 => simd_shuffle!(
5305 a,
5306 b,
5307 [
5308 0,
5309 1,
5310 2,
5311 3,
5312 4,
5313 5,
5314 6,
5315 7,
5316 8,
5317 9,
5318 10,
5319 16 + LANE2 as u32,
5320 12,
5321 13,
5322 14,
5323 15
5324 ]
5325 ),
5326 12 => simd_shuffle!(
5327 a,
5328 b,
5329 [
5330 0,
5331 1,
5332 2,
5333 3,
5334 4,
5335 5,
5336 6,
5337 7,
5338 8,
5339 9,
5340 10,
5341 11,
5342 16 + LANE2 as u32,
5343 13,
5344 14,
5345 15
5346 ]
5347 ),
5348 13 => simd_shuffle!(
5349 a,
5350 b,
5351 [
5352 0,
5353 1,
5354 2,
5355 3,
5356 4,
5357 5,
5358 6,
5359 7,
5360 8,
5361 9,
5362 10,
5363 11,
5364 12,
5365 16 + LANE2 as u32,
5366 14,
5367 15
5368 ]
5369 ),
5370 14 => simd_shuffle!(
5371 a,
5372 b,
5373 [
5374 0,
5375 1,
5376 2,
5377 3,
5378 4,
5379 5,
5380 6,
5381 7,
5382 8,
5383 9,
5384 10,
5385 11,
5386 12,
5387 13,
5388 16 + LANE2 as u32,
5389 15
5390 ]
5391 ),
5392 15 => simd_shuffle!(
5393 a,
5394 b,
5395 [
5396 0,
5397 1,
5398 2,
5399 3,
5400 4,
5401 5,
5402 6,
5403 7,
5404 8,
5405 9,
5406 10,
5407 11,
5408 12,
5409 13,
5410 14,
5411 16 + LANE2 as u32
5412 ]
5413 ),
5414 _ => unreachable_unchecked(),
5415 }
5416 }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426 a: uint16x8_t,
5427 b: uint16x4_t,
5428) -> uint16x8_t {
5429 static_assert_uimm_bits!(LANE1, 3);
5430 static_assert_uimm_bits!(LANE2, 2);
5431 let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432 unsafe {
5433 match LANE1 & 0b111 {
5434 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442 _ => unreachable_unchecked(),
5443 }
5444 }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454 a: uint32x4_t,
5455 b: uint32x2_t,
5456) -> uint32x4_t {
5457 static_assert_uimm_bits!(LANE1, 2);
5458 static_assert_uimm_bits!(LANE2, 1);
5459 let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460 unsafe {
5461 match LANE1 & 0b11 {
5462 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466 _ => unreachable_unchecked(),
5467 }
5468 }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478 a: poly8x16_t,
5479 b: poly8x8_t,
5480) -> poly8x16_t {
5481 static_assert_uimm_bits!(LANE1, 4);
5482 static_assert_uimm_bits!(LANE2, 3);
5483 let b: poly8x16_t =
5484 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485 unsafe {
5486 match LANE1 & 0b1111 {
5487 0 => simd_shuffle!(
5488 a,
5489 b,
5490 [
5491 16 + LANE2 as u32,
5492 1,
5493 2,
5494 3,
5495 4,
5496 5,
5497 6,
5498 7,
5499 8,
5500 9,
5501 10,
5502 11,
5503 12,
5504 13,
5505 14,
5506 15
5507 ]
5508 ),
5509 1 => simd_shuffle!(
5510 a,
5511 b,
5512 [
5513 0,
5514 16 + LANE2 as u32,
5515 2,
5516 3,
5517 4,
5518 5,
5519 6,
5520 7,
5521 8,
5522 9,
5523 10,
5524 11,
5525 12,
5526 13,
5527 14,
5528 15
5529 ]
5530 ),
5531 2 => simd_shuffle!(
5532 a,
5533 b,
5534 [
5535 0,
5536 1,
5537 16 + LANE2 as u32,
5538 3,
5539 4,
5540 5,
5541 6,
5542 7,
5543 8,
5544 9,
5545 10,
5546 11,
5547 12,
5548 13,
5549 14,
5550 15
5551 ]
5552 ),
5553 3 => simd_shuffle!(
5554 a,
5555 b,
5556 [
5557 0,
5558 1,
5559 2,
5560 16 + LANE2 as u32,
5561 4,
5562 5,
5563 6,
5564 7,
5565 8,
5566 9,
5567 10,
5568 11,
5569 12,
5570 13,
5571 14,
5572 15
5573 ]
5574 ),
5575 4 => simd_shuffle!(
5576 a,
5577 b,
5578 [
5579 0,
5580 1,
5581 2,
5582 3,
5583 16 + LANE2 as u32,
5584 5,
5585 6,
5586 7,
5587 8,
5588 9,
5589 10,
5590 11,
5591 12,
5592 13,
5593 14,
5594 15
5595 ]
5596 ),
5597 5 => simd_shuffle!(
5598 a,
5599 b,
5600 [
5601 0,
5602 1,
5603 2,
5604 3,
5605 4,
5606 16 + LANE2 as u32,
5607 6,
5608 7,
5609 8,
5610 9,
5611 10,
5612 11,
5613 12,
5614 13,
5615 14,
5616 15
5617 ]
5618 ),
5619 6 => simd_shuffle!(
5620 a,
5621 b,
5622 [
5623 0,
5624 1,
5625 2,
5626 3,
5627 4,
5628 5,
5629 16 + LANE2 as u32,
5630 7,
5631 8,
5632 9,
5633 10,
5634 11,
5635 12,
5636 13,
5637 14,
5638 15
5639 ]
5640 ),
5641 7 => simd_shuffle!(
5642 a,
5643 b,
5644 [
5645 0,
5646 1,
5647 2,
5648 3,
5649 4,
5650 5,
5651 6,
5652 16 + LANE2 as u32,
5653 8,
5654 9,
5655 10,
5656 11,
5657 12,
5658 13,
5659 14,
5660 15
5661 ]
5662 ),
5663 8 => simd_shuffle!(
5664 a,
5665 b,
5666 [
5667 0,
5668 1,
5669 2,
5670 3,
5671 4,
5672 5,
5673 6,
5674 7,
5675 16 + LANE2 as u32,
5676 9,
5677 10,
5678 11,
5679 12,
5680 13,
5681 14,
5682 15
5683 ]
5684 ),
5685 9 => simd_shuffle!(
5686 a,
5687 b,
5688 [
5689 0,
5690 1,
5691 2,
5692 3,
5693 4,
5694 5,
5695 6,
5696 7,
5697 8,
5698 16 + LANE2 as u32,
5699 10,
5700 11,
5701 12,
5702 13,
5703 14,
5704 15
5705 ]
5706 ),
5707 10 => simd_shuffle!(
5708 a,
5709 b,
5710 [
5711 0,
5712 1,
5713 2,
5714 3,
5715 4,
5716 5,
5717 6,
5718 7,
5719 8,
5720 9,
5721 16 + LANE2 as u32,
5722 11,
5723 12,
5724 13,
5725 14,
5726 15
5727 ]
5728 ),
5729 11 => simd_shuffle!(
5730 a,
5731 b,
5732 [
5733 0,
5734 1,
5735 2,
5736 3,
5737 4,
5738 5,
5739 6,
5740 7,
5741 8,
5742 9,
5743 10,
5744 16 + LANE2 as u32,
5745 12,
5746 13,
5747 14,
5748 15
5749 ]
5750 ),
5751 12 => simd_shuffle!(
5752 a,
5753 b,
5754 [
5755 0,
5756 1,
5757 2,
5758 3,
5759 4,
5760 5,
5761 6,
5762 7,
5763 8,
5764 9,
5765 10,
5766 11,
5767 16 + LANE2 as u32,
5768 13,
5769 14,
5770 15
5771 ]
5772 ),
5773 13 => simd_shuffle!(
5774 a,
5775 b,
5776 [
5777 0,
5778 1,
5779 2,
5780 3,
5781 4,
5782 5,
5783 6,
5784 7,
5785 8,
5786 9,
5787 10,
5788 11,
5789 12,
5790 16 + LANE2 as u32,
5791 14,
5792 15
5793 ]
5794 ),
5795 14 => simd_shuffle!(
5796 a,
5797 b,
5798 [
5799 0,
5800 1,
5801 2,
5802 3,
5803 4,
5804 5,
5805 6,
5806 7,
5807 8,
5808 9,
5809 10,
5810 11,
5811 12,
5812 13,
5813 16 + LANE2 as u32,
5814 15
5815 ]
5816 ),
5817 15 => simd_shuffle!(
5818 a,
5819 b,
5820 [
5821 0,
5822 1,
5823 2,
5824 3,
5825 4,
5826 5,
5827 6,
5828 7,
5829 8,
5830 9,
5831 10,
5832 11,
5833 12,
5834 13,
5835 14,
5836 16 + LANE2 as u32
5837 ]
5838 ),
5839 _ => unreachable_unchecked(),
5840 }
5841 }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851 a: poly16x8_t,
5852 b: poly16x4_t,
5853) -> poly16x8_t {
5854 static_assert_uimm_bits!(LANE1, 3);
5855 static_assert_uimm_bits!(LANE2, 2);
5856 let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857 unsafe {
5858 match LANE1 & 0b111 {
5859 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867 _ => unreachable_unchecked(),
5868 }
5869 }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879 a: float32x4_t,
5880 b: float32x4_t,
5881) -> float32x4_t {
5882 static_assert_uimm_bits!(LANE1, 2);
5883 static_assert_uimm_bits!(LANE2, 2);
5884 unsafe {
5885 match LANE1 & 0b11 {
5886 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890 _ => unreachable_unchecked(),
5891 }
5892 }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902 a: float64x2_t,
5903 b: float64x2_t,
5904) -> float64x2_t {
5905 static_assert_uimm_bits!(LANE1, 1);
5906 static_assert_uimm_bits!(LANE2, 1);
5907 unsafe {
5908 match LANE1 & 0b1 {
5909 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911 _ => unreachable_unchecked(),
5912 }
5913 }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923 a: int8x16_t,
5924 b: int8x16_t,
5925) -> int8x16_t {
5926 static_assert_uimm_bits!(LANE1, 4);
5927 static_assert_uimm_bits!(LANE2, 4);
5928 unsafe {
5929 match LANE1 & 0b1111 {
5930 0 => simd_shuffle!(
5931 a,
5932 b,
5933 [
5934 16 + LANE2 as u32,
5935 1,
5936 2,
5937 3,
5938 4,
5939 5,
5940 6,
5941 7,
5942 8,
5943 9,
5944 10,
5945 11,
5946 12,
5947 13,
5948 14,
5949 15
5950 ]
5951 ),
5952 1 => simd_shuffle!(
5953 a,
5954 b,
5955 [
5956 0,
5957 16 + LANE2 as u32,
5958 2,
5959 3,
5960 4,
5961 5,
5962 6,
5963 7,
5964 8,
5965 9,
5966 10,
5967 11,
5968 12,
5969 13,
5970 14,
5971 15
5972 ]
5973 ),
5974 2 => simd_shuffle!(
5975 a,
5976 b,
5977 [
5978 0,
5979 1,
5980 16 + LANE2 as u32,
5981 3,
5982 4,
5983 5,
5984 6,
5985 7,
5986 8,
5987 9,
5988 10,
5989 11,
5990 12,
5991 13,
5992 14,
5993 15
5994 ]
5995 ),
5996 3 => simd_shuffle!(
5997 a,
5998 b,
5999 [
6000 0,
6001 1,
6002 2,
6003 16 + LANE2 as u32,
6004 4,
6005 5,
6006 6,
6007 7,
6008 8,
6009 9,
6010 10,
6011 11,
6012 12,
6013 13,
6014 14,
6015 15
6016 ]
6017 ),
6018 4 => simd_shuffle!(
6019 a,
6020 b,
6021 [
6022 0,
6023 1,
6024 2,
6025 3,
6026 16 + LANE2 as u32,
6027 5,
6028 6,
6029 7,
6030 8,
6031 9,
6032 10,
6033 11,
6034 12,
6035 13,
6036 14,
6037 15
6038 ]
6039 ),
6040 5 => simd_shuffle!(
6041 a,
6042 b,
6043 [
6044 0,
6045 1,
6046 2,
6047 3,
6048 4,
6049 16 + LANE2 as u32,
6050 6,
6051 7,
6052 8,
6053 9,
6054 10,
6055 11,
6056 12,
6057 13,
6058 14,
6059 15
6060 ]
6061 ),
6062 6 => simd_shuffle!(
6063 a,
6064 b,
6065 [
6066 0,
6067 1,
6068 2,
6069 3,
6070 4,
6071 5,
6072 16 + LANE2 as u32,
6073 7,
6074 8,
6075 9,
6076 10,
6077 11,
6078 12,
6079 13,
6080 14,
6081 15
6082 ]
6083 ),
6084 7 => simd_shuffle!(
6085 a,
6086 b,
6087 [
6088 0,
6089 1,
6090 2,
6091 3,
6092 4,
6093 5,
6094 6,
6095 16 + LANE2 as u32,
6096 8,
6097 9,
6098 10,
6099 11,
6100 12,
6101 13,
6102 14,
6103 15
6104 ]
6105 ),
6106 8 => simd_shuffle!(
6107 a,
6108 b,
6109 [
6110 0,
6111 1,
6112 2,
6113 3,
6114 4,
6115 5,
6116 6,
6117 7,
6118 16 + LANE2 as u32,
6119 9,
6120 10,
6121 11,
6122 12,
6123 13,
6124 14,
6125 15
6126 ]
6127 ),
6128 9 => simd_shuffle!(
6129 a,
6130 b,
6131 [
6132 0,
6133 1,
6134 2,
6135 3,
6136 4,
6137 5,
6138 6,
6139 7,
6140 8,
6141 16 + LANE2 as u32,
6142 10,
6143 11,
6144 12,
6145 13,
6146 14,
6147 15
6148 ]
6149 ),
6150 10 => simd_shuffle!(
6151 a,
6152 b,
6153 [
6154 0,
6155 1,
6156 2,
6157 3,
6158 4,
6159 5,
6160 6,
6161 7,
6162 8,
6163 9,
6164 16 + LANE2 as u32,
6165 11,
6166 12,
6167 13,
6168 14,
6169 15
6170 ]
6171 ),
6172 11 => simd_shuffle!(
6173 a,
6174 b,
6175 [
6176 0,
6177 1,
6178 2,
6179 3,
6180 4,
6181 5,
6182 6,
6183 7,
6184 8,
6185 9,
6186 10,
6187 16 + LANE2 as u32,
6188 12,
6189 13,
6190 14,
6191 15
6192 ]
6193 ),
6194 12 => simd_shuffle!(
6195 a,
6196 b,
6197 [
6198 0,
6199 1,
6200 2,
6201 3,
6202 4,
6203 5,
6204 6,
6205 7,
6206 8,
6207 9,
6208 10,
6209 11,
6210 16 + LANE2 as u32,
6211 13,
6212 14,
6213 15
6214 ]
6215 ),
6216 13 => simd_shuffle!(
6217 a,
6218 b,
6219 [
6220 0,
6221 1,
6222 2,
6223 3,
6224 4,
6225 5,
6226 6,
6227 7,
6228 8,
6229 9,
6230 10,
6231 11,
6232 12,
6233 16 + LANE2 as u32,
6234 14,
6235 15
6236 ]
6237 ),
6238 14 => simd_shuffle!(
6239 a,
6240 b,
6241 [
6242 0,
6243 1,
6244 2,
6245 3,
6246 4,
6247 5,
6248 6,
6249 7,
6250 8,
6251 9,
6252 10,
6253 11,
6254 12,
6255 13,
6256 16 + LANE2 as u32,
6257 15
6258 ]
6259 ),
6260 15 => simd_shuffle!(
6261 a,
6262 b,
6263 [
6264 0,
6265 1,
6266 2,
6267 3,
6268 4,
6269 5,
6270 6,
6271 7,
6272 8,
6273 9,
6274 10,
6275 11,
6276 12,
6277 13,
6278 14,
6279 16 + LANE2 as u32
6280 ]
6281 ),
6282 _ => unreachable_unchecked(),
6283 }
6284 }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294 a: int16x8_t,
6295 b: int16x8_t,
6296) -> int16x8_t {
6297 static_assert_uimm_bits!(LANE1, 3);
6298 static_assert_uimm_bits!(LANE2, 3);
6299 unsafe {
6300 match LANE1 & 0b111 {
6301 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309 _ => unreachable_unchecked(),
6310 }
6311 }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321 a: int32x4_t,
6322 b: int32x4_t,
6323) -> int32x4_t {
6324 static_assert_uimm_bits!(LANE1, 2);
6325 static_assert_uimm_bits!(LANE2, 2);
6326 unsafe {
6327 match LANE1 & 0b11 {
6328 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332 _ => unreachable_unchecked(),
6333 }
6334 }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344 a: int64x2_t,
6345 b: int64x2_t,
6346) -> int64x2_t {
6347 static_assert_uimm_bits!(LANE1, 1);
6348 static_assert_uimm_bits!(LANE2, 1);
6349 unsafe {
6350 match LANE1 & 0b1 {
6351 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353 _ => unreachable_unchecked(),
6354 }
6355 }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365 a: uint8x16_t,
6366 b: uint8x16_t,
6367) -> uint8x16_t {
6368 static_assert_uimm_bits!(LANE1, 4);
6369 static_assert_uimm_bits!(LANE2, 4);
6370 unsafe {
6371 match LANE1 & 0b1111 {
6372 0 => simd_shuffle!(
6373 a,
6374 b,
6375 [
6376 16 + LANE2 as u32,
6377 1,
6378 2,
6379 3,
6380 4,
6381 5,
6382 6,
6383 7,
6384 8,
6385 9,
6386 10,
6387 11,
6388 12,
6389 13,
6390 14,
6391 15
6392 ]
6393 ),
6394 1 => simd_shuffle!(
6395 a,
6396 b,
6397 [
6398 0,
6399 16 + LANE2 as u32,
6400 2,
6401 3,
6402 4,
6403 5,
6404 6,
6405 7,
6406 8,
6407 9,
6408 10,
6409 11,
6410 12,
6411 13,
6412 14,
6413 15
6414 ]
6415 ),
6416 2 => simd_shuffle!(
6417 a,
6418 b,
6419 [
6420 0,
6421 1,
6422 16 + LANE2 as u32,
6423 3,
6424 4,
6425 5,
6426 6,
6427 7,
6428 8,
6429 9,
6430 10,
6431 11,
6432 12,
6433 13,
6434 14,
6435 15
6436 ]
6437 ),
6438 3 => simd_shuffle!(
6439 a,
6440 b,
6441 [
6442 0,
6443 1,
6444 2,
6445 16 + LANE2 as u32,
6446 4,
6447 5,
6448 6,
6449 7,
6450 8,
6451 9,
6452 10,
6453 11,
6454 12,
6455 13,
6456 14,
6457 15
6458 ]
6459 ),
6460 4 => simd_shuffle!(
6461 a,
6462 b,
6463 [
6464 0,
6465 1,
6466 2,
6467 3,
6468 16 + LANE2 as u32,
6469 5,
6470 6,
6471 7,
6472 8,
6473 9,
6474 10,
6475 11,
6476 12,
6477 13,
6478 14,
6479 15
6480 ]
6481 ),
6482 5 => simd_shuffle!(
6483 a,
6484 b,
6485 [
6486 0,
6487 1,
6488 2,
6489 3,
6490 4,
6491 16 + LANE2 as u32,
6492 6,
6493 7,
6494 8,
6495 9,
6496 10,
6497 11,
6498 12,
6499 13,
6500 14,
6501 15
6502 ]
6503 ),
6504 6 => simd_shuffle!(
6505 a,
6506 b,
6507 [
6508 0,
6509 1,
6510 2,
6511 3,
6512 4,
6513 5,
6514 16 + LANE2 as u32,
6515 7,
6516 8,
6517 9,
6518 10,
6519 11,
6520 12,
6521 13,
6522 14,
6523 15
6524 ]
6525 ),
6526 7 => simd_shuffle!(
6527 a,
6528 b,
6529 [
6530 0,
6531 1,
6532 2,
6533 3,
6534 4,
6535 5,
6536 6,
6537 16 + LANE2 as u32,
6538 8,
6539 9,
6540 10,
6541 11,
6542 12,
6543 13,
6544 14,
6545 15
6546 ]
6547 ),
6548 8 => simd_shuffle!(
6549 a,
6550 b,
6551 [
6552 0,
6553 1,
6554 2,
6555 3,
6556 4,
6557 5,
6558 6,
6559 7,
6560 16 + LANE2 as u32,
6561 9,
6562 10,
6563 11,
6564 12,
6565 13,
6566 14,
6567 15
6568 ]
6569 ),
6570 9 => simd_shuffle!(
6571 a,
6572 b,
6573 [
6574 0,
6575 1,
6576 2,
6577 3,
6578 4,
6579 5,
6580 6,
6581 7,
6582 8,
6583 16 + LANE2 as u32,
6584 10,
6585 11,
6586 12,
6587 13,
6588 14,
6589 15
6590 ]
6591 ),
6592 10 => simd_shuffle!(
6593 a,
6594 b,
6595 [
6596 0,
6597 1,
6598 2,
6599 3,
6600 4,
6601 5,
6602 6,
6603 7,
6604 8,
6605 9,
6606 16 + LANE2 as u32,
6607 11,
6608 12,
6609 13,
6610 14,
6611 15
6612 ]
6613 ),
6614 11 => simd_shuffle!(
6615 a,
6616 b,
6617 [
6618 0,
6619 1,
6620 2,
6621 3,
6622 4,
6623 5,
6624 6,
6625 7,
6626 8,
6627 9,
6628 10,
6629 16 + LANE2 as u32,
6630 12,
6631 13,
6632 14,
6633 15
6634 ]
6635 ),
6636 12 => simd_shuffle!(
6637 a,
6638 b,
6639 [
6640 0,
6641 1,
6642 2,
6643 3,
6644 4,
6645 5,
6646 6,
6647 7,
6648 8,
6649 9,
6650 10,
6651 11,
6652 16 + LANE2 as u32,
6653 13,
6654 14,
6655 15
6656 ]
6657 ),
6658 13 => simd_shuffle!(
6659 a,
6660 b,
6661 [
6662 0,
6663 1,
6664 2,
6665 3,
6666 4,
6667 5,
6668 6,
6669 7,
6670 8,
6671 9,
6672 10,
6673 11,
6674 12,
6675 16 + LANE2 as u32,
6676 14,
6677 15
6678 ]
6679 ),
6680 14 => simd_shuffle!(
6681 a,
6682 b,
6683 [
6684 0,
6685 1,
6686 2,
6687 3,
6688 4,
6689 5,
6690 6,
6691 7,
6692 8,
6693 9,
6694 10,
6695 11,
6696 12,
6697 13,
6698 16 + LANE2 as u32,
6699 15
6700 ]
6701 ),
6702 15 => simd_shuffle!(
6703 a,
6704 b,
6705 [
6706 0,
6707 1,
6708 2,
6709 3,
6710 4,
6711 5,
6712 6,
6713 7,
6714 8,
6715 9,
6716 10,
6717 11,
6718 12,
6719 13,
6720 14,
6721 16 + LANE2 as u32
6722 ]
6723 ),
6724 _ => unreachable_unchecked(),
6725 }
6726 }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736 a: uint16x8_t,
6737 b: uint16x8_t,
6738) -> uint16x8_t {
6739 static_assert_uimm_bits!(LANE1, 3);
6740 static_assert_uimm_bits!(LANE2, 3);
6741 unsafe {
6742 match LANE1 & 0b111 {
6743 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751 _ => unreachable_unchecked(),
6752 }
6753 }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763 a: uint32x4_t,
6764 b: uint32x4_t,
6765) -> uint32x4_t {
6766 static_assert_uimm_bits!(LANE1, 2);
6767 static_assert_uimm_bits!(LANE2, 2);
6768 unsafe {
6769 match LANE1 & 0b11 {
6770 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774 _ => unreachable_unchecked(),
6775 }
6776 }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786 a: uint64x2_t,
6787 b: uint64x2_t,
6788) -> uint64x2_t {
6789 static_assert_uimm_bits!(LANE1, 1);
6790 static_assert_uimm_bits!(LANE2, 1);
6791 unsafe {
6792 match LANE1 & 0b1 {
6793 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795 _ => unreachable_unchecked(),
6796 }
6797 }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807 a: poly8x16_t,
6808 b: poly8x16_t,
6809) -> poly8x16_t {
6810 static_assert_uimm_bits!(LANE1, 4);
6811 static_assert_uimm_bits!(LANE2, 4);
6812 unsafe {
6813 match LANE1 & 0b1111 {
6814 0 => simd_shuffle!(
6815 a,
6816 b,
6817 [
6818 16 + LANE2 as u32,
6819 1,
6820 2,
6821 3,
6822 4,
6823 5,
6824 6,
6825 7,
6826 8,
6827 9,
6828 10,
6829 11,
6830 12,
6831 13,
6832 14,
6833 15
6834 ]
6835 ),
6836 1 => simd_shuffle!(
6837 a,
6838 b,
6839 [
6840 0,
6841 16 + LANE2 as u32,
6842 2,
6843 3,
6844 4,
6845 5,
6846 6,
6847 7,
6848 8,
6849 9,
6850 10,
6851 11,
6852 12,
6853 13,
6854 14,
6855 15
6856 ]
6857 ),
6858 2 => simd_shuffle!(
6859 a,
6860 b,
6861 [
6862 0,
6863 1,
6864 16 + LANE2 as u32,
6865 3,
6866 4,
6867 5,
6868 6,
6869 7,
6870 8,
6871 9,
6872 10,
6873 11,
6874 12,
6875 13,
6876 14,
6877 15
6878 ]
6879 ),
6880 3 => simd_shuffle!(
6881 a,
6882 b,
6883 [
6884 0,
6885 1,
6886 2,
6887 16 + LANE2 as u32,
6888 4,
6889 5,
6890 6,
6891 7,
6892 8,
6893 9,
6894 10,
6895 11,
6896 12,
6897 13,
6898 14,
6899 15
6900 ]
6901 ),
6902 4 => simd_shuffle!(
6903 a,
6904 b,
6905 [
6906 0,
6907 1,
6908 2,
6909 3,
6910 16 + LANE2 as u32,
6911 5,
6912 6,
6913 7,
6914 8,
6915 9,
6916 10,
6917 11,
6918 12,
6919 13,
6920 14,
6921 15
6922 ]
6923 ),
6924 5 => simd_shuffle!(
6925 a,
6926 b,
6927 [
6928 0,
6929 1,
6930 2,
6931 3,
6932 4,
6933 16 + LANE2 as u32,
6934 6,
6935 7,
6936 8,
6937 9,
6938 10,
6939 11,
6940 12,
6941 13,
6942 14,
6943 15
6944 ]
6945 ),
6946 6 => simd_shuffle!(
6947 a,
6948 b,
6949 [
6950 0,
6951 1,
6952 2,
6953 3,
6954 4,
6955 5,
6956 16 + LANE2 as u32,
6957 7,
6958 8,
6959 9,
6960 10,
6961 11,
6962 12,
6963 13,
6964 14,
6965 15
6966 ]
6967 ),
6968 7 => simd_shuffle!(
6969 a,
6970 b,
6971 [
6972 0,
6973 1,
6974 2,
6975 3,
6976 4,
6977 5,
6978 6,
6979 16 + LANE2 as u32,
6980 8,
6981 9,
6982 10,
6983 11,
6984 12,
6985 13,
6986 14,
6987 15
6988 ]
6989 ),
6990 8 => simd_shuffle!(
6991 a,
6992 b,
6993 [
6994 0,
6995 1,
6996 2,
6997 3,
6998 4,
6999 5,
7000 6,
7001 7,
7002 16 + LANE2 as u32,
7003 9,
7004 10,
7005 11,
7006 12,
7007 13,
7008 14,
7009 15
7010 ]
7011 ),
7012 9 => simd_shuffle!(
7013 a,
7014 b,
7015 [
7016 0,
7017 1,
7018 2,
7019 3,
7020 4,
7021 5,
7022 6,
7023 7,
7024 8,
7025 16 + LANE2 as u32,
7026 10,
7027 11,
7028 12,
7029 13,
7030 14,
7031 15
7032 ]
7033 ),
7034 10 => simd_shuffle!(
7035 a,
7036 b,
7037 [
7038 0,
7039 1,
7040 2,
7041 3,
7042 4,
7043 5,
7044 6,
7045 7,
7046 8,
7047 9,
7048 16 + LANE2 as u32,
7049 11,
7050 12,
7051 13,
7052 14,
7053 15
7054 ]
7055 ),
7056 11 => simd_shuffle!(
7057 a,
7058 b,
7059 [
7060 0,
7061 1,
7062 2,
7063 3,
7064 4,
7065 5,
7066 6,
7067 7,
7068 8,
7069 9,
7070 10,
7071 16 + LANE2 as u32,
7072 12,
7073 13,
7074 14,
7075 15
7076 ]
7077 ),
7078 12 => simd_shuffle!(
7079 a,
7080 b,
7081 [
7082 0,
7083 1,
7084 2,
7085 3,
7086 4,
7087 5,
7088 6,
7089 7,
7090 8,
7091 9,
7092 10,
7093 11,
7094 16 + LANE2 as u32,
7095 13,
7096 14,
7097 15
7098 ]
7099 ),
7100 13 => simd_shuffle!(
7101 a,
7102 b,
7103 [
7104 0,
7105 1,
7106 2,
7107 3,
7108 4,
7109 5,
7110 6,
7111 7,
7112 8,
7113 9,
7114 10,
7115 11,
7116 12,
7117 16 + LANE2 as u32,
7118 14,
7119 15
7120 ]
7121 ),
7122 14 => simd_shuffle!(
7123 a,
7124 b,
7125 [
7126 0,
7127 1,
7128 2,
7129 3,
7130 4,
7131 5,
7132 6,
7133 7,
7134 8,
7135 9,
7136 10,
7137 11,
7138 12,
7139 13,
7140 16 + LANE2 as u32,
7141 15
7142 ]
7143 ),
7144 15 => simd_shuffle!(
7145 a,
7146 b,
7147 [
7148 0,
7149 1,
7150 2,
7151 3,
7152 4,
7153 5,
7154 6,
7155 7,
7156 8,
7157 9,
7158 10,
7159 11,
7160 12,
7161 13,
7162 14,
7163 16 + LANE2 as u32
7164 ]
7165 ),
7166 _ => unreachable_unchecked(),
7167 }
7168 }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178 a: poly16x8_t,
7179 b: poly16x8_t,
7180) -> poly16x8_t {
7181 static_assert_uimm_bits!(LANE1, 3);
7182 static_assert_uimm_bits!(LANE2, 3);
7183 unsafe {
7184 match LANE1 & 0b111 {
7185 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193 _ => unreachable_unchecked(),
7194 }
7195 }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205 a: poly64x2_t,
7206 b: poly64x2_t,
7207) -> poly64x2_t {
7208 static_assert_uimm_bits!(LANE1, 1);
7209 static_assert_uimm_bits!(LANE2, 1);
7210 unsafe {
7211 match LANE1 & 0b1 {
7212 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214 _ => unreachable_unchecked(),
7215 }
7216 }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225 unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234 unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243 unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252 unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261 unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270 unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279 unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289 vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299 vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308 unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317 unsafe {
7318 let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319 simd_cast(b)
7320 }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330 static_assert!(N >= 1 && N <= 64);
7331 unsafe extern "unadjusted" {
7332 #[cfg_attr(
7333 any(target_arch = "aarch64", target_arch = "arm64ec"),
7334 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335 )]
7336 fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337 }
7338 unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348 static_assert!(N >= 1 && N <= 64);
7349 unsafe extern "unadjusted" {
7350 #[cfg_attr(
7351 any(target_arch = "aarch64", target_arch = "arm64ec"),
7352 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353 )]
7354 fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355 }
7356 unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366 static_assert!(N >= 1 && N <= 64);
7367 unsafe extern "unadjusted" {
7368 #[cfg_attr(
7369 any(target_arch = "aarch64", target_arch = "arm64ec"),
7370 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371 )]
7372 fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373 }
7374 unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384 static_assert!(N >= 1 && N <= 64);
7385 unsafe extern "unadjusted" {
7386 #[cfg_attr(
7387 any(target_arch = "aarch64", target_arch = "arm64ec"),
7388 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389 )]
7390 fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391 }
7392 unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402 static_assert!(N >= 1 && N <= 64);
7403 unsafe extern "unadjusted" {
7404 #[cfg_attr(
7405 any(target_arch = "aarch64", target_arch = "arm64ec"),
7406 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407 )]
7408 fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409 }
7410 unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420 static_assert!(N >= 1 && N <= 64);
7421 unsafe extern "unadjusted" {
7422 #[cfg_attr(
7423 any(target_arch = "aarch64", target_arch = "arm64ec"),
7424 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425 )]
7426 fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427 }
7428 unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438 static_assert!(N >= 1 && N <= 64);
7439 unsafe extern "unadjusted" {
7440 #[cfg_attr(
7441 any(target_arch = "aarch64", target_arch = "arm64ec"),
7442 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443 )]
7444 fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445 }
7446 unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456 static_assert!(N >= 1 && N <= 64);
7457 unsafe extern "unadjusted" {
7458 #[cfg_attr(
7459 any(target_arch = "aarch64", target_arch = "arm64ec"),
7460 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461 )]
7462 fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463 }
7464 unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473 unsafe extern "unadjusted" {
7474 #[cfg_attr(
7475 any(target_arch = "aarch64", target_arch = "arm64ec"),
7476 link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477 )]
7478 fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479 }
7480 unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489 unsafe extern "unadjusted" {
7490 #[cfg_attr(
7491 any(target_arch = "aarch64", target_arch = "arm64ec"),
7492 link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493 )]
7494 fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495 }
7496 unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505 unsafe extern "unadjusted" {
7506 #[cfg_attr(
7507 any(target_arch = "aarch64", target_arch = "arm64ec"),
7508 link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509 )]
7510 fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511 }
7512 unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521 unsafe extern "unadjusted" {
7522 #[cfg_attr(
7523 any(target_arch = "aarch64", target_arch = "arm64ec"),
7524 link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525 )]
7526 fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527 }
7528 unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538 unsafe extern "unadjusted" {
7539 #[cfg_attr(
7540 any(target_arch = "aarch64", target_arch = "arm64ec"),
7541 link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542 )]
7543 fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544 }
7545 unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555 unsafe extern "unadjusted" {
7556 #[cfg_attr(
7557 any(target_arch = "aarch64", target_arch = "arm64ec"),
7558 link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559 )]
7560 fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561 }
7562 unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571 unsafe extern "unadjusted" {
7572 #[cfg_attr(
7573 any(target_arch = "aarch64", target_arch = "arm64ec"),
7574 link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575 )]
7576 fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577 }
7578 unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587 unsafe extern "unadjusted" {
7588 #[cfg_attr(
7589 any(target_arch = "aarch64", target_arch = "arm64ec"),
7590 link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591 )]
7592 fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593 }
7594 unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603 unsafe extern "unadjusted" {
7604 #[cfg_attr(
7605 any(target_arch = "aarch64", target_arch = "arm64ec"),
7606 link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607 )]
7608 fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609 }
7610 unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619 unsafe extern "unadjusted" {
7620 #[cfg_attr(
7621 any(target_arch = "aarch64", target_arch = "arm64ec"),
7622 link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623 )]
7624 fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625 }
7626 unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636 unsafe extern "unadjusted" {
7637 #[cfg_attr(
7638 any(target_arch = "aarch64", target_arch = "arm64ec"),
7639 link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640 )]
7641 fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642 }
7643 unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653 unsafe extern "unadjusted" {
7654 #[cfg_attr(
7655 any(target_arch = "aarch64", target_arch = "arm64ec"),
7656 link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657 )]
7658 fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659 }
7660 unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669 unsafe extern "unadjusted" {
7670 #[cfg_attr(
7671 any(target_arch = "aarch64", target_arch = "arm64ec"),
7672 link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673 )]
7674 fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675 }
7676 unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685 unsafe extern "unadjusted" {
7686 #[cfg_attr(
7687 any(target_arch = "aarch64", target_arch = "arm64ec"),
7688 link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689 )]
7690 fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691 }
7692 unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701 unsafe extern "unadjusted" {
7702 #[cfg_attr(
7703 any(target_arch = "aarch64", target_arch = "arm64ec"),
7704 link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705 )]
7706 fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707 }
7708 unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717 unsafe extern "unadjusted" {
7718 #[cfg_attr(
7719 any(target_arch = "aarch64", target_arch = "arm64ec"),
7720 link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721 )]
7722 fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723 }
7724 unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734 vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744 unsafe extern "unadjusted" {
7745 #[cfg_attr(
7746 any(target_arch = "aarch64", target_arch = "arm64ec"),
7747 link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748 )]
7749 fn _vcvtah_s32_f16(a: f16) -> i32;
7750 }
7751 unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761 unsafe extern "unadjusted" {
7762 #[cfg_attr(
7763 any(target_arch = "aarch64", target_arch = "arm64ec"),
7764 link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765 )]
7766 fn _vcvtah_s64_f16(a: f16) -> i64;
7767 }
7768 unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778 vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788 unsafe extern "unadjusted" {
7789 #[cfg_attr(
7790 any(target_arch = "aarch64", target_arch = "arm64ec"),
7791 link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792 )]
7793 fn _vcvtah_u32_f16(a: f16) -> u32;
7794 }
7795 unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805 unsafe extern "unadjusted" {
7806 #[cfg_attr(
7807 any(target_arch = "aarch64", target_arch = "arm64ec"),
7808 link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809 )]
7810 fn _vcvtah_u64_f16(a: f16) -> u64;
7811 }
7812 unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821 unsafe extern "unadjusted" {
7822 #[cfg_attr(
7823 any(target_arch = "aarch64", target_arch = "arm64ec"),
7824 link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825 )]
7826 fn _vcvtas_s32_f32(a: f32) -> i32;
7827 }
7828 unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837 unsafe extern "unadjusted" {
7838 #[cfg_attr(
7839 any(target_arch = "aarch64", target_arch = "arm64ec"),
7840 link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841 )]
7842 fn _vcvtad_s64_f64(a: f64) -> i64;
7843 }
7844 unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853 unsafe extern "unadjusted" {
7854 #[cfg_attr(
7855 any(target_arch = "aarch64", target_arch = "arm64ec"),
7856 link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857 )]
7858 fn _vcvtas_u32_f32(a: f32) -> u32;
7859 }
7860 unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869 unsafe extern "unadjusted" {
7870 #[cfg_attr(
7871 any(target_arch = "aarch64", target_arch = "arm64ec"),
7872 link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873 )]
7874 fn _vcvtad_u64_f64(a: f64) -> u64;
7875 }
7876 unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885 a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894 a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904 a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914 a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924 a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934 a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944 a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954 a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965 static_assert!(N >= 1 && N <= 16);
7966 vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977 static_assert!(N >= 1 && N <= 16);
7978 unsafe extern "unadjusted" {
7979 #[cfg_attr(
7980 any(target_arch = "aarch64", target_arch = "arm64ec"),
7981 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982 )]
7983 fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984 }
7985 unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996 static_assert!(N >= 1 && N <= 16);
7997 unsafe extern "unadjusted" {
7998 #[cfg_attr(
7999 any(target_arch = "aarch64", target_arch = "arm64ec"),
8000 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001 )]
8002 fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003 }
8004 unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015 static_assert!(N >= 1 && N <= 16);
8016 vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027 static_assert!(N >= 1 && N <= 16);
8028 unsafe extern "unadjusted" {
8029 #[cfg_attr(
8030 any(target_arch = "aarch64", target_arch = "arm64ec"),
8031 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032 )]
8033 fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034 }
8035 unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046 static_assert!(N >= 1 && N <= 16);
8047 unsafe extern "unadjusted" {
8048 #[cfg_attr(
8049 any(target_arch = "aarch64", target_arch = "arm64ec"),
8050 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051 )]
8052 fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053 }
8054 unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065 static_assert!(N >= 1 && N <= 16);
8066 vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077 static_assert!(N >= 1 && N <= 16);
8078 unsafe extern "unadjusted" {
8079 #[cfg_attr(
8080 any(target_arch = "aarch64", target_arch = "arm64ec"),
8081 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082 )]
8083 fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084 }
8085 unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096 static_assert!(N >= 1 && N <= 16);
8097 unsafe extern "unadjusted" {
8098 #[cfg_attr(
8099 any(target_arch = "aarch64", target_arch = "arm64ec"),
8100 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101 )]
8102 fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103 }
8104 unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115 static_assert!(N >= 1 && N <= 16);
8116 vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127 static_assert!(N >= 1 && N <= 16);
8128 unsafe extern "unadjusted" {
8129 #[cfg_attr(
8130 any(target_arch = "aarch64", target_arch = "arm64ec"),
8131 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132 )]
8133 fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134 }
8135 unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146 static_assert!(N >= 1 && N <= 16);
8147 unsafe extern "unadjusted" {
8148 #[cfg_attr(
8149 any(target_arch = "aarch64", target_arch = "arm64ec"),
8150 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151 )]
8152 fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153 }
8154 unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164 a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174 a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184 a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194 a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204 a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214 a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224 unsafe extern "unadjusted" {
8225 #[cfg_attr(
8226 any(target_arch = "aarch64", target_arch = "arm64ec"),
8227 link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228 )]
8229 fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230 }
8231 unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241 unsafe extern "unadjusted" {
8242 #[cfg_attr(
8243 any(target_arch = "aarch64", target_arch = "arm64ec"),
8244 link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245 )]
8246 fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247 }
8248 unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257 unsafe extern "unadjusted" {
8258 #[cfg_attr(
8259 any(target_arch = "aarch64", target_arch = "arm64ec"),
8260 link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261 )]
8262 fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263 }
8264 unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273 unsafe extern "unadjusted" {
8274 #[cfg_attr(
8275 any(target_arch = "aarch64", target_arch = "arm64ec"),
8276 link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277 )]
8278 fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279 }
8280 unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289 unsafe extern "unadjusted" {
8290 #[cfg_attr(
8291 any(target_arch = "aarch64", target_arch = "arm64ec"),
8292 link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293 )]
8294 fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295 }
8296 unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305 unsafe extern "unadjusted" {
8306 #[cfg_attr(
8307 any(target_arch = "aarch64", target_arch = "arm64ec"),
8308 link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309 )]
8310 fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311 }
8312 unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322 unsafe extern "unadjusted" {
8323 #[cfg_attr(
8324 any(target_arch = "aarch64", target_arch = "arm64ec"),
8325 link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326 )]
8327 fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328 }
8329 unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339 unsafe extern "unadjusted" {
8340 #[cfg_attr(
8341 any(target_arch = "aarch64", target_arch = "arm64ec"),
8342 link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343 )]
8344 fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345 }
8346 unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355 unsafe extern "unadjusted" {
8356 #[cfg_attr(
8357 any(target_arch = "aarch64", target_arch = "arm64ec"),
8358 link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359 )]
8360 fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361 }
8362 unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371 unsafe extern "unadjusted" {
8372 #[cfg_attr(
8373 any(target_arch = "aarch64", target_arch = "arm64ec"),
8374 link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375 )]
8376 fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377 }
8378 unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387 unsafe extern "unadjusted" {
8388 #[cfg_attr(
8389 any(target_arch = "aarch64", target_arch = "arm64ec"),
8390 link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391 )]
8392 fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393 }
8394 unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403 unsafe extern "unadjusted" {
8404 #[cfg_attr(
8405 any(target_arch = "aarch64", target_arch = "arm64ec"),
8406 link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407 )]
8408 fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409 }
8410 unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420 vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430 unsafe extern "unadjusted" {
8431 #[cfg_attr(
8432 any(target_arch = "aarch64", target_arch = "arm64ec"),
8433 link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434 )]
8435 fn _vcvtmh_s32_f16(a: f16) -> i32;
8436 }
8437 unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447 unsafe extern "unadjusted" {
8448 #[cfg_attr(
8449 any(target_arch = "aarch64", target_arch = "arm64ec"),
8450 link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451 )]
8452 fn _vcvtmh_s64_f16(a: f16) -> i64;
8453 }
8454 unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464 vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474 unsafe extern "unadjusted" {
8475 #[cfg_attr(
8476 any(target_arch = "aarch64", target_arch = "arm64ec"),
8477 link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478 )]
8479 fn _vcvtmh_u32_f16(a: f16) -> u32;
8480 }
8481 unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491 unsafe extern "unadjusted" {
8492 #[cfg_attr(
8493 any(target_arch = "aarch64", target_arch = "arm64ec"),
8494 link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495 )]
8496 fn _vcvtmh_u64_f16(a: f16) -> u64;
8497 }
8498 unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507 unsafe extern "unadjusted" {
8508 #[cfg_attr(
8509 any(target_arch = "aarch64", target_arch = "arm64ec"),
8510 link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511 )]
8512 fn _vcvtms_s32_f32(a: f32) -> i32;
8513 }
8514 unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523 unsafe extern "unadjusted" {
8524 #[cfg_attr(
8525 any(target_arch = "aarch64", target_arch = "arm64ec"),
8526 link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527 )]
8528 fn _vcvtmd_s64_f64(a: f64) -> i64;
8529 }
8530 unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539 unsafe extern "unadjusted" {
8540 #[cfg_attr(
8541 any(target_arch = "aarch64", target_arch = "arm64ec"),
8542 link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543 )]
8544 fn _vcvtms_u32_f32(a: f32) -> u32;
8545 }
8546 unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555 unsafe extern "unadjusted" {
8556 #[cfg_attr(
8557 any(target_arch = "aarch64", target_arch = "arm64ec"),
8558 link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559 )]
8560 fn _vcvtmd_u64_f64(a: f64) -> u64;
8561 }
8562 unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572 unsafe extern "unadjusted" {
8573 #[cfg_attr(
8574 any(target_arch = "aarch64", target_arch = "arm64ec"),
8575 link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576 )]
8577 fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578 }
8579 unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589 unsafe extern "unadjusted" {
8590 #[cfg_attr(
8591 any(target_arch = "aarch64", target_arch = "arm64ec"),
8592 link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593 )]
8594 fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595 }
8596 unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605 unsafe extern "unadjusted" {
8606 #[cfg_attr(
8607 any(target_arch = "aarch64", target_arch = "arm64ec"),
8608 link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609 )]
8610 fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611 }
8612 unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621 unsafe extern "unadjusted" {
8622 #[cfg_attr(
8623 any(target_arch = "aarch64", target_arch = "arm64ec"),
8624 link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625 )]
8626 fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627 }
8628 unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637 unsafe extern "unadjusted" {
8638 #[cfg_attr(
8639 any(target_arch = "aarch64", target_arch = "arm64ec"),
8640 link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641 )]
8642 fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643 }
8644 unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653 unsafe extern "unadjusted" {
8654 #[cfg_attr(
8655 any(target_arch = "aarch64", target_arch = "arm64ec"),
8656 link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657 )]
8658 fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659 }
8660 unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670 unsafe extern "unadjusted" {
8671 #[cfg_attr(
8672 any(target_arch = "aarch64", target_arch = "arm64ec"),
8673 link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674 )]
8675 fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676 }
8677 unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687 unsafe extern "unadjusted" {
8688 #[cfg_attr(
8689 any(target_arch = "aarch64", target_arch = "arm64ec"),
8690 link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691 )]
8692 fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693 }
8694 unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703 unsafe extern "unadjusted" {
8704 #[cfg_attr(
8705 any(target_arch = "aarch64", target_arch = "arm64ec"),
8706 link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707 )]
8708 fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709 }
8710 unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719 unsafe extern "unadjusted" {
8720 #[cfg_attr(
8721 any(target_arch = "aarch64", target_arch = "arm64ec"),
8722 link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723 )]
8724 fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725 }
8726 unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735 unsafe extern "unadjusted" {
8736 #[cfg_attr(
8737 any(target_arch = "aarch64", target_arch = "arm64ec"),
8738 link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739 )]
8740 fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741 }
8742 unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751 unsafe extern "unadjusted" {
8752 #[cfg_attr(
8753 any(target_arch = "aarch64", target_arch = "arm64ec"),
8754 link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755 )]
8756 fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757 }
8758 unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768 vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778 unsafe extern "unadjusted" {
8779 #[cfg_attr(
8780 any(target_arch = "aarch64", target_arch = "arm64ec"),
8781 link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782 )]
8783 fn _vcvtnh_s32_f16(a: f16) -> i32;
8784 }
8785 unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795 unsafe extern "unadjusted" {
8796 #[cfg_attr(
8797 any(target_arch = "aarch64", target_arch = "arm64ec"),
8798 link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799 )]
8800 fn _vcvtnh_s64_f16(a: f16) -> i64;
8801 }
8802 unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812 vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822 unsafe extern "unadjusted" {
8823 #[cfg_attr(
8824 any(target_arch = "aarch64", target_arch = "arm64ec"),
8825 link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826 )]
8827 fn _vcvtnh_u32_f16(a: f16) -> u32;
8828 }
8829 unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839 unsafe extern "unadjusted" {
8840 #[cfg_attr(
8841 any(target_arch = "aarch64", target_arch = "arm64ec"),
8842 link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843 )]
8844 fn _vcvtnh_u64_f16(a: f16) -> u64;
8845 }
8846 unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855 unsafe extern "unadjusted" {
8856 #[cfg_attr(
8857 any(target_arch = "aarch64", target_arch = "arm64ec"),
8858 link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859 )]
8860 fn _vcvtns_s32_f32(a: f32) -> i32;
8861 }
8862 unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871 unsafe extern "unadjusted" {
8872 #[cfg_attr(
8873 any(target_arch = "aarch64", target_arch = "arm64ec"),
8874 link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875 )]
8876 fn _vcvtnd_s64_f64(a: f64) -> i64;
8877 }
8878 unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887 unsafe extern "unadjusted" {
8888 #[cfg_attr(
8889 any(target_arch = "aarch64", target_arch = "arm64ec"),
8890 link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891 )]
8892 fn _vcvtns_u32_f32(a: f32) -> u32;
8893 }
8894 unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903 unsafe extern "unadjusted" {
8904 #[cfg_attr(
8905 any(target_arch = "aarch64", target_arch = "arm64ec"),
8906 link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907 )]
8908 fn _vcvtnd_u64_f64(a: f64) -> u64;
8909 }
8910 unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920 unsafe extern "unadjusted" {
8921 #[cfg_attr(
8922 any(target_arch = "aarch64", target_arch = "arm64ec"),
8923 link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924 )]
8925 fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926 }
8927 unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937 unsafe extern "unadjusted" {
8938 #[cfg_attr(
8939 any(target_arch = "aarch64", target_arch = "arm64ec"),
8940 link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941 )]
8942 fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943 }
8944 unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953 unsafe extern "unadjusted" {
8954 #[cfg_attr(
8955 any(target_arch = "aarch64", target_arch = "arm64ec"),
8956 link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957 )]
8958 fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959 }
8960 unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969 unsafe extern "unadjusted" {
8970 #[cfg_attr(
8971 any(target_arch = "aarch64", target_arch = "arm64ec"),
8972 link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973 )]
8974 fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975 }
8976 unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985 unsafe extern "unadjusted" {
8986 #[cfg_attr(
8987 any(target_arch = "aarch64", target_arch = "arm64ec"),
8988 link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989 )]
8990 fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991 }
8992 unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001 unsafe extern "unadjusted" {
9002 #[cfg_attr(
9003 any(target_arch = "aarch64", target_arch = "arm64ec"),
9004 link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005 )]
9006 fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007 }
9008 unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018 unsafe extern "unadjusted" {
9019 #[cfg_attr(
9020 any(target_arch = "aarch64", target_arch = "arm64ec"),
9021 link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022 )]
9023 fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024 }
9025 unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035 unsafe extern "unadjusted" {
9036 #[cfg_attr(
9037 any(target_arch = "aarch64", target_arch = "arm64ec"),
9038 link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039 )]
9040 fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041 }
9042 unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051 unsafe extern "unadjusted" {
9052 #[cfg_attr(
9053 any(target_arch = "aarch64", target_arch = "arm64ec"),
9054 link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055 )]
9056 fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057 }
9058 unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067 unsafe extern "unadjusted" {
9068 #[cfg_attr(
9069 any(target_arch = "aarch64", target_arch = "arm64ec"),
9070 link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071 )]
9072 fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073 }
9074 unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083 unsafe extern "unadjusted" {
9084 #[cfg_attr(
9085 any(target_arch = "aarch64", target_arch = "arm64ec"),
9086 link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087 )]
9088 fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089 }
9090 unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099 unsafe extern "unadjusted" {
9100 #[cfg_attr(
9101 any(target_arch = "aarch64", target_arch = "arm64ec"),
9102 link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103 )]
9104 fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105 }
9106 unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116 vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126 unsafe extern "unadjusted" {
9127 #[cfg_attr(
9128 any(target_arch = "aarch64", target_arch = "arm64ec"),
9129 link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130 )]
9131 fn _vcvtph_s32_f16(a: f16) -> i32;
9132 }
9133 unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143 unsafe extern "unadjusted" {
9144 #[cfg_attr(
9145 any(target_arch = "aarch64", target_arch = "arm64ec"),
9146 link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147 )]
9148 fn _vcvtph_s64_f16(a: f16) -> i64;
9149 }
9150 unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160 vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170 unsafe extern "unadjusted" {
9171 #[cfg_attr(
9172 any(target_arch = "aarch64", target_arch = "arm64ec"),
9173 link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174 )]
9175 fn _vcvtph_u32_f16(a: f16) -> u32;
9176 }
9177 unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187 unsafe extern "unadjusted" {
9188 #[cfg_attr(
9189 any(target_arch = "aarch64", target_arch = "arm64ec"),
9190 link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191 )]
9192 fn _vcvtph_u64_f16(a: f16) -> u64;
9193 }
9194 unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203 unsafe extern "unadjusted" {
9204 #[cfg_attr(
9205 any(target_arch = "aarch64", target_arch = "arm64ec"),
9206 link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207 )]
9208 fn _vcvtps_s32_f32(a: f32) -> i32;
9209 }
9210 unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219 unsafe extern "unadjusted" {
9220 #[cfg_attr(
9221 any(target_arch = "aarch64", target_arch = "arm64ec"),
9222 link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223 )]
9224 fn _vcvtpd_s64_f64(a: f64) -> i64;
9225 }
9226 unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235 unsafe extern "unadjusted" {
9236 #[cfg_attr(
9237 any(target_arch = "aarch64", target_arch = "arm64ec"),
9238 link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239 )]
9240 fn _vcvtps_u32_f32(a: f32) -> u32;
9241 }
9242 unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251 unsafe extern "unadjusted" {
9252 #[cfg_attr(
9253 any(target_arch = "aarch64", target_arch = "arm64ec"),
9254 link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255 )]
9256 fn _vcvtpd_u64_f64(a: f64) -> u64;
9257 }
9258 unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267 a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276 a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286 static_assert!(N >= 1 && N <= 64);
9287 unsafe extern "unadjusted" {
9288 #[cfg_attr(
9289 any(target_arch = "aarch64", target_arch = "arm64ec"),
9290 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291 )]
9292 fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293 }
9294 unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304 static_assert!(N >= 1 && N <= 64);
9305 unsafe extern "unadjusted" {
9306 #[cfg_attr(
9307 any(target_arch = "aarch64", target_arch = "arm64ec"),
9308 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309 )]
9310 fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311 }
9312 unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322 static_assert!(N >= 1 && N <= 32);
9323 unsafe extern "unadjusted" {
9324 #[cfg_attr(
9325 any(target_arch = "aarch64", target_arch = "arm64ec"),
9326 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327 )]
9328 fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329 }
9330 unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340 static_assert!(N >= 1 && N <= 64);
9341 unsafe extern "unadjusted" {
9342 #[cfg_attr(
9343 any(target_arch = "aarch64", target_arch = "arm64ec"),
9344 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345 )]
9346 fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347 }
9348 unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358 static_assert!(N >= 1 && N <= 32);
9359 unsafe extern "unadjusted" {
9360 #[cfg_attr(
9361 any(target_arch = "aarch64", target_arch = "arm64ec"),
9362 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363 )]
9364 fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365 }
9366 unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376 static_assert!(N >= 1 && N <= 64);
9377 unsafe extern "unadjusted" {
9378 #[cfg_attr(
9379 any(target_arch = "aarch64", target_arch = "arm64ec"),
9380 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381 )]
9382 fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383 }
9384 unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394 static_assert!(N >= 1 && N <= 32);
9395 unsafe extern "unadjusted" {
9396 #[cfg_attr(
9397 any(target_arch = "aarch64", target_arch = "arm64ec"),
9398 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399 )]
9400 fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401 }
9402 unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412 static_assert!(N >= 1 && N <= 64);
9413 unsafe extern "unadjusted" {
9414 #[cfg_attr(
9415 any(target_arch = "aarch64", target_arch = "arm64ec"),
9416 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417 )]
9418 fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419 }
9420 unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429 a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438 a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447 a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456 a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465 unsafe extern "unadjusted" {
9466 #[cfg_attr(
9467 any(target_arch = "aarch64", target_arch = "arm64ec"),
9468 link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469 )]
9470 fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471 }
9472 unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481 unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490 unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500 unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510 unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519 unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528 unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537 unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546 unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556 a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566 static_assert!(N == 0);
9567 a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577 static_assert!(N == 0);
9578 a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588 static_assert_uimm_bits!(N, 1);
9589 unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599 static_assert_uimm_bits!(N, 1);
9600 unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610 static_assert_uimm_bits!(N, 3);
9611 unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621 static_assert_uimm_bits!(N, 3);
9622 unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632 static_assert_uimm_bits!(N, 3);
9633 unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643 static_assert_uimm_bits!(N, 3);
9644 unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654 static_assert_uimm_bits!(N, 3);
9655 unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665 static_assert_uimm_bits!(N, 3);
9666 unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676 static_assert_uimm_bits!(N, 4);
9677 unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687 static_assert_uimm_bits!(N, 4);
9688 unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698 static_assert_uimm_bits!(N, 4);
9699 unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709 static_assert!(N == 0);
9710 unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720 static_assert!(N == 0);
9721 unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731 static_assert!(N == 0);
9732 unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743 static_assert_uimm_bits!(N, 2);
9744 unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755 static_assert_uimm_bits!(N, 4);
9756 unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766 static_assert!(N == 0);
9767 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777 static_assert!(N == 0);
9778 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788 static_assert_uimm_bits!(N, 1);
9789 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799 static_assert_uimm_bits!(N, 1);
9800 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810 static_assert_uimm_bits!(N, 1);
9811 unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821 static_assert_uimm_bits!(N, 1);
9822 unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832 static_assert_uimm_bits!(N, 1);
9833 unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843 static_assert_uimm_bits!(N, 1);
9844 unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854 static_assert_uimm_bits!(N, 1);
9855 unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865 static_assert_uimm_bits!(N, 1);
9866 unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876 static_assert_uimm_bits!(N, 2);
9877 unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887 static_assert_uimm_bits!(N, 2);
9888 unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898 static_assert_uimm_bits!(N, 2);
9899 unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909 static_assert_uimm_bits!(N, 2);
9910 unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920 static_assert_uimm_bits!(N, 2);
9921 unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931 static_assert_uimm_bits!(N, 2);
9932 unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941 unsafe extern "unadjusted" {
9942 #[cfg_attr(
9943 any(target_arch = "aarch64", target_arch = "arm64ec"),
9944 link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945 )]
9946 fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947 }
9948 unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957 unsafe extern "unadjusted" {
9958 #[cfg_attr(
9959 any(target_arch = "aarch64", target_arch = "arm64ec"),
9960 link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961 )]
9962 fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963 }
9964 unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973 unsafe extern "unadjusted" {
9974 #[cfg_attr(
9975 any(target_arch = "aarch64", target_arch = "arm64ec"),
9976 link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977 )]
9978 fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979 }
9980 unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989 unsafe extern "unadjusted" {
9990 #[cfg_attr(
9991 any(target_arch = "aarch64", target_arch = "arm64ec"),
9992 link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993 )]
9994 fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995 }
9996 unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005 unsafe extern "unadjusted" {
10006 #[cfg_attr(
10007 any(target_arch = "aarch64", target_arch = "arm64ec"),
10008 link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009 )]
10010 fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011 }
10012 unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021 unsafe extern "unadjusted" {
10022 #[cfg_attr(
10023 any(target_arch = "aarch64", target_arch = "arm64ec"),
10024 link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025 )]
10026 fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027 }
10028 unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037 unsafe extern "unadjusted" {
10038 #[cfg_attr(
10039 any(target_arch = "aarch64", target_arch = "arm64ec"),
10040 link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041 )]
10042 fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043 }
10044 unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053 unsafe extern "unadjusted" {
10054 #[cfg_attr(
10055 any(target_arch = "aarch64", target_arch = "arm64ec"),
10056 link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057 )]
10058 fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059 }
10060 unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070 static_assert_uimm_bits!(N, 1);
10071 unsafe {
10072 match N & 0b1 {
10073 0 => simd_shuffle!(a, b, [0, 1]),
10074 1 => simd_shuffle!(a, b, [1, 2]),
10075 _ => unreachable_unchecked(),
10076 }
10077 }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087 static_assert_uimm_bits!(N, 1);
10088 unsafe {
10089 match N & 0b1 {
10090 0 => simd_shuffle!(a, b, [0, 1]),
10091 1 => simd_shuffle!(a, b, [1, 2]),
10092 _ => unreachable_unchecked(),
10093 }
10094 }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103 unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114 a: float16x4_t,
10115 b: float16x4_t,
10116 c: float16x4_t,
10117) -> float16x4_t {
10118 static_assert_uimm_bits!(LANE, 2);
10119 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130 a: float16x4_t,
10131 b: float16x4_t,
10132 c: float16x8_t,
10133) -> float16x4_t {
10134 static_assert_uimm_bits!(LANE, 3);
10135 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146 a: float16x8_t,
10147 b: float16x8_t,
10148 c: float16x4_t,
10149) -> float16x8_t {
10150 static_assert_uimm_bits!(LANE, 2);
10151 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162 a: float16x8_t,
10163 b: float16x8_t,
10164 c: float16x8_t,
10165) -> float16x8_t {
10166 static_assert_uimm_bits!(LANE, 3);
10167 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177 a: float32x2_t,
10178 b: float32x2_t,
10179 c: float32x2_t,
10180) -> float32x2_t {
10181 static_assert_uimm_bits!(LANE, 1);
10182 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192 a: float32x2_t,
10193 b: float32x2_t,
10194 c: float32x4_t,
10195) -> float32x2_t {
10196 static_assert_uimm_bits!(LANE, 2);
10197 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207 a: float32x4_t,
10208 b: float32x4_t,
10209 c: float32x2_t,
10210) -> float32x4_t {
10211 static_assert_uimm_bits!(LANE, 1);
10212 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222 a: float32x4_t,
10223 b: float32x4_t,
10224 c: float32x4_t,
10225) -> float32x4_t {
10226 static_assert_uimm_bits!(LANE, 2);
10227 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237 a: float64x2_t,
10238 b: float64x2_t,
10239 c: float64x2_t,
10240) -> float64x2_t {
10241 static_assert_uimm_bits!(LANE, 1);
10242 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252 a: float64x1_t,
10253 b: float64x1_t,
10254 c: float64x1_t,
10255) -> float64x1_t {
10256 static_assert!(LANE == 0);
10257 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267 a: float64x1_t,
10268 b: float64x1_t,
10269 c: float64x2_t,
10270) -> float64x1_t {
10271 static_assert_uimm_bits!(LANE, 1);
10272 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282 vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292 vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301 vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311 static_assert!(LANE == 0);
10312 unsafe {
10313 let c: f64 = simd_extract!(c, LANE as u32);
10314 fmaf64(b, c, a)
10315 }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325 fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336 static_assert_uimm_bits!(LANE, 2);
10337 unsafe {
10338 let c: f16 = simd_extract!(v, LANE as u32);
10339 vfmah_f16(a, b, c)
10340 }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351 static_assert_uimm_bits!(LANE, 3);
10352 unsafe {
10353 let c: f16 = simd_extract!(v, LANE as u32);
10354 vfmah_f16(a, b, c)
10355 }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364 unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374 a: float64x2_t,
10375 b: float64x2_t,
10376 c: float64x1_t,
10377) -> float64x2_t {
10378 static_assert!(LANE == 0);
10379 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388 vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398 static_assert_uimm_bits!(LANE, 1);
10399 unsafe {
10400 let c: f32 = simd_extract!(c, LANE as u32);
10401 fmaf32(b, c, a)
10402 }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412 static_assert_uimm_bits!(LANE, 2);
10413 unsafe {
10414 let c: f32 = simd_extract!(c, LANE as u32);
10415 fmaf32(b, c, a)
10416 }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426 static_assert_uimm_bits!(LANE, 1);
10427 unsafe {
10428 let c: f64 = simd_extract!(c, LANE as u32);
10429 fmaf64(b, c, a)
10430 }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441 unsafe extern "unadjusted" {
10442 #[cfg_attr(
10443 any(target_arch = "aarch64", target_arch = "arm64ec"),
10444 link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445 )]
10446 fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447 }
10448 unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459 unsafe extern "unadjusted" {
10460 #[cfg_attr(
10461 any(target_arch = "aarch64", target_arch = "arm64ec"),
10462 link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463 )]
10464 fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465 }
10466 unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478 r: float32x2_t,
10479 a: float16x4_t,
10480 b: float16x4_t,
10481) -> float32x2_t {
10482 static_assert_uimm_bits!(LANE, 2);
10483 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495 r: float32x2_t,
10496 a: float16x4_t,
10497 b: float16x8_t,
10498) -> float32x2_t {
10499 static_assert_uimm_bits!(LANE, 3);
10500 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512 r: float32x4_t,
10513 a: float16x8_t,
10514 b: float16x4_t,
10515) -> float32x4_t {
10516 static_assert_uimm_bits!(LANE, 2);
10517 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529 r: float32x4_t,
10530 a: float16x8_t,
10531 b: float16x8_t,
10532) -> float32x4_t {
10533 static_assert_uimm_bits!(LANE, 3);
10534 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546 r: float32x2_t,
10547 a: float16x4_t,
10548 b: float16x4_t,
10549) -> float32x2_t {
10550 static_assert_uimm_bits!(LANE, 2);
10551 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563 r: float32x2_t,
10564 a: float16x4_t,
10565 b: float16x8_t,
10566) -> float32x2_t {
10567 static_assert_uimm_bits!(LANE, 3);
10568 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580 r: float32x4_t,
10581 a: float16x8_t,
10582 b: float16x4_t,
10583) -> float32x4_t {
10584 static_assert_uimm_bits!(LANE, 2);
10585 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597 r: float32x4_t,
10598 a: float16x8_t,
10599 b: float16x8_t,
10600) -> float32x4_t {
10601 static_assert_uimm_bits!(LANE, 3);
10602 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613 unsafe extern "unadjusted" {
10614 #[cfg_attr(
10615 any(target_arch = "aarch64", target_arch = "arm64ec"),
10616 link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617 )]
10618 fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619 }
10620 unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631 unsafe extern "unadjusted" {
10632 #[cfg_attr(
10633 any(target_arch = "aarch64", target_arch = "arm64ec"),
10634 link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635 )]
10636 fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637 }
10638 unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649 unsafe extern "unadjusted" {
10650 #[cfg_attr(
10651 any(target_arch = "aarch64", target_arch = "arm64ec"),
10652 link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653 )]
10654 fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655 }
10656 unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667 unsafe extern "unadjusted" {
10668 #[cfg_attr(
10669 any(target_arch = "aarch64", target_arch = "arm64ec"),
10670 link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671 )]
10672 fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673 }
10674 unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686 r: float32x2_t,
10687 a: float16x4_t,
10688 b: float16x4_t,
10689) -> float32x2_t {
10690 static_assert_uimm_bits!(LANE, 2);
10691 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703 r: float32x2_t,
10704 a: float16x4_t,
10705 b: float16x8_t,
10706) -> float32x2_t {
10707 static_assert_uimm_bits!(LANE, 3);
10708 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720 r: float32x4_t,
10721 a: float16x8_t,
10722 b: float16x4_t,
10723) -> float32x4_t {
10724 static_assert_uimm_bits!(LANE, 2);
10725 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737 r: float32x4_t,
10738 a: float16x8_t,
10739 b: float16x8_t,
10740) -> float32x4_t {
10741 static_assert_uimm_bits!(LANE, 3);
10742 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754 r: float32x2_t,
10755 a: float16x4_t,
10756 b: float16x4_t,
10757) -> float32x2_t {
10758 static_assert_uimm_bits!(LANE, 2);
10759 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771 r: float32x2_t,
10772 a: float16x4_t,
10773 b: float16x8_t,
10774) -> float32x2_t {
10775 static_assert_uimm_bits!(LANE, 3);
10776 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788 r: float32x4_t,
10789 a: float16x8_t,
10790 b: float16x4_t,
10791) -> float32x4_t {
10792 static_assert_uimm_bits!(LANE, 2);
10793 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805 r: float32x4_t,
10806 a: float16x8_t,
10807 b: float16x8_t,
10808) -> float32x4_t {
10809 static_assert_uimm_bits!(LANE, 3);
10810 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821 unsafe extern "unadjusted" {
10822 #[cfg_attr(
10823 any(target_arch = "aarch64", target_arch = "arm64ec"),
10824 link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825 )]
10826 fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827 }
10828 unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839 unsafe extern "unadjusted" {
10840 #[cfg_attr(
10841 any(target_arch = "aarch64", target_arch = "arm64ec"),
10842 link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843 )]
10844 fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845 }
10846 unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855 unsafe {
10856 let b: float64x1_t = simd_neg(b);
10857 vfma_f64(a, b, c)
10858 }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869 a: float16x4_t,
10870 b: float16x4_t,
10871 c: float16x4_t,
10872) -> float16x4_t {
10873 static_assert_uimm_bits!(LANE, 2);
10874 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885 a: float16x4_t,
10886 b: float16x4_t,
10887 c: float16x8_t,
10888) -> float16x4_t {
10889 static_assert_uimm_bits!(LANE, 3);
10890 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901 a: float16x8_t,
10902 b: float16x8_t,
10903 c: float16x4_t,
10904) -> float16x8_t {
10905 static_assert_uimm_bits!(LANE, 2);
10906 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917 a: float16x8_t,
10918 b: float16x8_t,
10919 c: float16x8_t,
10920) -> float16x8_t {
10921 static_assert_uimm_bits!(LANE, 3);
10922 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932 a: float32x2_t,
10933 b: float32x2_t,
10934 c: float32x2_t,
10935) -> float32x2_t {
10936 static_assert_uimm_bits!(LANE, 1);
10937 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947 a: float32x2_t,
10948 b: float32x2_t,
10949 c: float32x4_t,
10950) -> float32x2_t {
10951 static_assert_uimm_bits!(LANE, 2);
10952 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962 a: float32x4_t,
10963 b: float32x4_t,
10964 c: float32x2_t,
10965) -> float32x4_t {
10966 static_assert_uimm_bits!(LANE, 1);
10967 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977 a: float32x4_t,
10978 b: float32x4_t,
10979 c: float32x4_t,
10980) -> float32x4_t {
10981 static_assert_uimm_bits!(LANE, 2);
10982 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992 a: float64x2_t,
10993 b: float64x2_t,
10994 c: float64x2_t,
10995) -> float64x2_t {
10996 static_assert_uimm_bits!(LANE, 1);
10997 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007 a: float64x1_t,
11008 b: float64x1_t,
11009 c: float64x1_t,
11010) -> float64x1_t {
11011 static_assert!(LANE == 0);
11012 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022 a: float64x1_t,
11023 b: float64x1_t,
11024 c: float64x2_t,
11025) -> float64x1_t {
11026 static_assert_uimm_bits!(LANE, 1);
11027 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037 vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047 vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056 vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066 vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077 static_assert_uimm_bits!(LANE, 2);
11078 unsafe {
11079 let c: f16 = simd_extract!(v, LANE as u32);
11080 vfmsh_f16(a, b, c)
11081 }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092 static_assert_uimm_bits!(LANE, 3);
11093 unsafe {
11094 let c: f16 = simd_extract!(v, LANE as u32);
11095 vfmsh_f16(a, b, c)
11096 }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105 unsafe {
11106 let b: float64x2_t = simd_neg(b);
11107 vfmaq_f64(a, b, c)
11108 }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118 a: float64x2_t,
11119 b: float64x2_t,
11120 c: float64x1_t,
11121) -> float64x2_t {
11122 static_assert!(LANE == 0);
11123 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132 vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142 vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152 vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162 vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172 vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = " * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184 crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = " * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196 crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = " * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207 crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = " * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218 crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = " * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229 crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = " * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240 crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = " * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251 crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = " * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262 crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = " * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273 crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = " * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284 crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = " * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295 crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = " * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306 crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = " * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317 crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = " * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328 crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = " * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339 crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = " * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350 crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = " * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361 crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = " * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372 crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = " * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383 crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = " * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394 crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = " * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405 crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = " * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416 crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = " * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427 crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = " * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438 crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = " * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449 crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = " * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460 crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = " * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471 crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = " * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482 crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = " * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld1))]
11492pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11493 unsafe extern "unadjusted" {
11494 #[cfg_attr(
11495 any(target_arch = "aarch64", target_arch = "arm64ec"),
11496 link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11497 )]
11498 fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11499 }
11500 _vld1_f64_x2(a)
11501}
11502#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11504#[doc = "## Safety"]
11505#[doc = " * Neon intrinsic unsafe"]
11506#[inline(always)]
11507#[target_feature(enable = "neon")]
11508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11509#[cfg_attr(test, assert_instr(ld1))]
11510pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11511 unsafe extern "unadjusted" {
11512 #[cfg_attr(
11513 any(target_arch = "aarch64", target_arch = "arm64ec"),
11514 link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11515 )]
11516 fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11517 }
11518 _vld1_f64_x3(a)
11519}
11520#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11522#[doc = "## Safety"]
11523#[doc = " * Neon intrinsic unsafe"]
11524#[inline(always)]
11525#[target_feature(enable = "neon")]
11526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11527#[cfg_attr(test, assert_instr(ld1))]
11528pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11529 unsafe extern "unadjusted" {
11530 #[cfg_attr(
11531 any(target_arch = "aarch64", target_arch = "arm64ec"),
11532 link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11533 )]
11534 fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11535 }
11536 _vld1_f64_x4(a)
11537}
11538#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11540#[doc = "## Safety"]
11541#[doc = " * Neon intrinsic unsafe"]
11542#[inline(always)]
11543#[target_feature(enable = "neon")]
11544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11545#[cfg_attr(test, assert_instr(ld1))]
11546pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11547 unsafe extern "unadjusted" {
11548 #[cfg_attr(
11549 any(target_arch = "aarch64", target_arch = "arm64ec"),
11550 link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11551 )]
11552 fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11553 }
11554 _vld1q_f64_x2(a)
11555}
11556#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11558#[doc = "## Safety"]
11559#[doc = " * Neon intrinsic unsafe"]
11560#[inline(always)]
11561#[target_feature(enable = "neon")]
11562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11563#[cfg_attr(test, assert_instr(ld1))]
11564pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11565 unsafe extern "unadjusted" {
11566 #[cfg_attr(
11567 any(target_arch = "aarch64", target_arch = "arm64ec"),
11568 link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11569 )]
11570 fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11571 }
11572 _vld1q_f64_x3(a)
11573}
11574#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11576#[doc = "## Safety"]
11577#[doc = " * Neon intrinsic unsafe"]
11578#[inline(always)]
11579#[target_feature(enable = "neon")]
11580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11581#[cfg_attr(test, assert_instr(ld1))]
11582pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11583 unsafe extern "unadjusted" {
11584 #[cfg_attr(
11585 any(target_arch = "aarch64", target_arch = "arm64ec"),
11586 link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11587 )]
11588 fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11589 }
11590 _vld1q_f64_x4(a)
11591}
11592#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11594#[doc = "## Safety"]
11595#[doc = " * Neon intrinsic unsafe"]
11596#[inline(always)]
11597#[target_feature(enable = "neon")]
11598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11599#[cfg_attr(test, assert_instr(ld2r))]
11600pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11601 unsafe extern "unadjusted" {
11602 #[cfg_attr(
11603 any(target_arch = "aarch64", target_arch = "arm64ec"),
11604 link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11605 )]
11606 fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11607 }
11608 _vld2_dup_f64(a as _)
11609}
11610#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11612#[doc = "## Safety"]
11613#[doc = " * Neon intrinsic unsafe"]
11614#[inline(always)]
11615#[target_feature(enable = "neon")]
11616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11617#[cfg_attr(test, assert_instr(ld2r))]
11618pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11619 unsafe extern "unadjusted" {
11620 #[cfg_attr(
11621 any(target_arch = "aarch64", target_arch = "arm64ec"),
11622 link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11623 )]
11624 fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11625 }
11626 _vld2q_dup_f64(a as _)
11627}
11628#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11630#[doc = "## Safety"]
11631#[doc = " * Neon intrinsic unsafe"]
11632#[inline(always)]
11633#[target_feature(enable = "neon")]
11634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11635#[cfg_attr(test, assert_instr(ld2r))]
11636pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11637 unsafe extern "unadjusted" {
11638 #[cfg_attr(
11639 any(target_arch = "aarch64", target_arch = "arm64ec"),
11640 link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11641 )]
11642 fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11643 }
11644 _vld2q_dup_s64(a as _)
11645}
11646#[doc = "Load multiple 2-element structures to two registers"]
11647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11648#[doc = "## Safety"]
11649#[doc = " * Neon intrinsic unsafe"]
11650#[inline(always)]
11651#[target_feature(enable = "neon")]
11652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11653#[cfg_attr(test, assert_instr(nop))]
11654pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11655 unsafe extern "unadjusted" {
11656 #[cfg_attr(
11657 any(target_arch = "aarch64", target_arch = "arm64ec"),
11658 link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11659 )]
11660 fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11661 }
11662 _vld2_f64(a as _)
11663}
11664#[doc = "Load multiple 2-element structures to two registers"]
11665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11666#[doc = "## Safety"]
11667#[doc = " * Neon intrinsic unsafe"]
11668#[inline(always)]
11669#[target_feature(enable = "neon")]
11670#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11671#[rustc_legacy_const_generics(2)]
11672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11673pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11674 static_assert!(LANE == 0);
11675 unsafe extern "unadjusted" {
11676 #[cfg_attr(
11677 any(target_arch = "aarch64", target_arch = "arm64ec"),
11678 link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11679 )]
11680 fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11681 }
11682 _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11683}
11684#[doc = "Load multiple 2-element structures to two registers"]
11685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11686#[doc = "## Safety"]
11687#[doc = " * Neon intrinsic unsafe"]
11688#[inline(always)]
11689#[target_feature(enable = "neon")]
11690#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11691#[rustc_legacy_const_generics(2)]
11692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11693pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11694 static_assert!(LANE == 0);
11695 unsafe extern "unadjusted" {
11696 #[cfg_attr(
11697 any(target_arch = "aarch64", target_arch = "arm64ec"),
11698 link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11699 )]
11700 fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11701 }
11702 _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11703}
11704#[doc = "Load multiple 2-element structures to two registers"]
11705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11706#[doc = "## Safety"]
11707#[doc = " * Neon intrinsic unsafe"]
11708#[inline(always)]
11709#[target_feature(enable = "neon,aes")]
11710#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11711#[rustc_legacy_const_generics(2)]
11712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11713pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11714 static_assert!(LANE == 0);
11715 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11716}
11717#[doc = "Load multiple 2-element structures to two registers"]
11718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11719#[doc = "## Safety"]
11720#[doc = " * Neon intrinsic unsafe"]
11721#[inline(always)]
11722#[target_feature(enable = "neon")]
11723#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11724#[rustc_legacy_const_generics(2)]
11725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11726pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11727 static_assert!(LANE == 0);
11728 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11729}
11730#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11732#[doc = "## Safety"]
11733#[doc = " * Neon intrinsic unsafe"]
11734#[inline(always)]
11735#[cfg(target_endian = "little")]
11736#[target_feature(enable = "neon,aes")]
11737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11738#[cfg_attr(test, assert_instr(ld2r))]
11739pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11740 transmute(vld2q_dup_s64(transmute(a)))
11741}
11742#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11744#[doc = "## Safety"]
11745#[doc = " * Neon intrinsic unsafe"]
11746#[inline(always)]
11747#[cfg(target_endian = "big")]
11748#[target_feature(enable = "neon,aes")]
11749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11750#[cfg_attr(test, assert_instr(ld2r))]
11751pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11752 let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11753 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11754 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11755 ret_val
11756}
11757#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11759#[doc = "## Safety"]
11760#[doc = " * Neon intrinsic unsafe"]
11761#[inline(always)]
11762#[cfg(target_endian = "little")]
11763#[target_feature(enable = "neon")]
11764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11765#[cfg_attr(test, assert_instr(ld2r))]
11766pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11767 transmute(vld2q_dup_s64(transmute(a)))
11768}
11769#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11771#[doc = "## Safety"]
11772#[doc = " * Neon intrinsic unsafe"]
11773#[inline(always)]
11774#[cfg(target_endian = "big")]
11775#[target_feature(enable = "neon")]
11776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11777#[cfg_attr(test, assert_instr(ld2r))]
11778pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11779 let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11780 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11781 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11782 ret_val
11783}
11784#[doc = "Load multiple 2-element structures to two registers"]
11785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11786#[doc = "## Safety"]
11787#[doc = " * Neon intrinsic unsafe"]
11788#[inline(always)]
11789#[target_feature(enable = "neon")]
11790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11791#[cfg_attr(test, assert_instr(ld2))]
11792pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11793 unsafe extern "unadjusted" {
11794 #[cfg_attr(
11795 any(target_arch = "aarch64", target_arch = "arm64ec"),
11796 link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11797 )]
11798 fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11799 }
11800 _vld2q_f64(a as _)
11801}
11802#[doc = "Load multiple 2-element structures to two registers"]
11803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11804#[doc = "## Safety"]
11805#[doc = " * Neon intrinsic unsafe"]
11806#[inline(always)]
11807#[target_feature(enable = "neon")]
11808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11809#[cfg_attr(test, assert_instr(ld2))]
11810pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11811 unsafe extern "unadjusted" {
11812 #[cfg_attr(
11813 any(target_arch = "aarch64", target_arch = "arm64ec"),
11814 link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11815 )]
11816 fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11817 }
11818 _vld2q_s64(a as _)
11819}
11820#[doc = "Load multiple 2-element structures to two registers"]
11821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11822#[doc = "## Safety"]
11823#[doc = " * Neon intrinsic unsafe"]
11824#[inline(always)]
11825#[target_feature(enable = "neon")]
11826#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11827#[rustc_legacy_const_generics(2)]
11828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11829pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11830 static_assert_uimm_bits!(LANE, 1);
11831 unsafe extern "unadjusted" {
11832 #[cfg_attr(
11833 any(target_arch = "aarch64", target_arch = "arm64ec"),
11834 link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11835 )]
11836 fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11837 -> float64x2x2_t;
11838 }
11839 _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11840}
11841#[doc = "Load multiple 2-element structures to two registers"]
11842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11843#[doc = "## Safety"]
11844#[doc = " * Neon intrinsic unsafe"]
11845#[inline(always)]
11846#[target_feature(enable = "neon")]
11847#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11848#[rustc_legacy_const_generics(2)]
11849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11850pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11851 static_assert_uimm_bits!(LANE, 4);
11852 unsafe extern "unadjusted" {
11853 #[cfg_attr(
11854 any(target_arch = "aarch64", target_arch = "arm64ec"),
11855 link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11856 )]
11857 fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11858 }
11859 _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11860}
11861#[doc = "Load multiple 2-element structures to two registers"]
11862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11863#[doc = "## Safety"]
11864#[doc = " * Neon intrinsic unsafe"]
11865#[inline(always)]
11866#[target_feature(enable = "neon")]
11867#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11868#[rustc_legacy_const_generics(2)]
11869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11870pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11871 static_assert_uimm_bits!(LANE, 1);
11872 unsafe extern "unadjusted" {
11873 #[cfg_attr(
11874 any(target_arch = "aarch64", target_arch = "arm64ec"),
11875 link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11876 )]
11877 fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11878 }
11879 _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11880}
11881#[doc = "Load multiple 2-element structures to two registers"]
11882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11883#[doc = "## Safety"]
11884#[doc = " * Neon intrinsic unsafe"]
11885#[inline(always)]
11886#[target_feature(enable = "neon,aes")]
11887#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11888#[rustc_legacy_const_generics(2)]
11889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11890pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11891 static_assert_uimm_bits!(LANE, 1);
11892 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11893}
11894#[doc = "Load multiple 2-element structures to two registers"]
11895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11896#[doc = "## Safety"]
11897#[doc = " * Neon intrinsic unsafe"]
11898#[inline(always)]
11899#[target_feature(enable = "neon")]
11900#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11901#[rustc_legacy_const_generics(2)]
11902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11903pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11904 static_assert_uimm_bits!(LANE, 4);
11905 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11906}
11907#[doc = "Load multiple 2-element structures to two registers"]
11908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11909#[doc = "## Safety"]
11910#[doc = " * Neon intrinsic unsafe"]
11911#[inline(always)]
11912#[target_feature(enable = "neon")]
11913#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11914#[rustc_legacy_const_generics(2)]
11915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11916pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11917 static_assert_uimm_bits!(LANE, 1);
11918 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11919}
11920#[doc = "Load multiple 2-element structures to two registers"]
11921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11922#[doc = "## Safety"]
11923#[doc = " * Neon intrinsic unsafe"]
11924#[inline(always)]
11925#[target_feature(enable = "neon")]
11926#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11927#[rustc_legacy_const_generics(2)]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11930 static_assert_uimm_bits!(LANE, 4);
11931 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11932}
11933#[doc = "Load multiple 2-element structures to two registers"]
11934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11935#[doc = "## Safety"]
11936#[doc = " * Neon intrinsic unsafe"]
11937#[inline(always)]
11938#[cfg(target_endian = "little")]
11939#[target_feature(enable = "neon,aes")]
11940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11941#[cfg_attr(test, assert_instr(ld2))]
11942pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11943 transmute(vld2q_s64(transmute(a)))
11944}
11945#[doc = "Load multiple 2-element structures to two registers"]
11946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11947#[doc = "## Safety"]
11948#[doc = " * Neon intrinsic unsafe"]
11949#[inline(always)]
11950#[cfg(target_endian = "big")]
11951#[target_feature(enable = "neon,aes")]
11952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11953#[cfg_attr(test, assert_instr(ld2))]
11954pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11955 let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11956 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11957 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11958 ret_val
11959}
11960#[doc = "Load multiple 2-element structures to two registers"]
11961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11962#[doc = "## Safety"]
11963#[doc = " * Neon intrinsic unsafe"]
11964#[inline(always)]
11965#[cfg(target_endian = "little")]
11966#[target_feature(enable = "neon")]
11967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11968#[cfg_attr(test, assert_instr(ld2))]
11969pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11970 transmute(vld2q_s64(transmute(a)))
11971}
11972#[doc = "Load multiple 2-element structures to two registers"]
11973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11974#[doc = "## Safety"]
11975#[doc = " * Neon intrinsic unsafe"]
11976#[inline(always)]
11977#[cfg(target_endian = "big")]
11978#[target_feature(enable = "neon")]
11979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11980#[cfg_attr(test, assert_instr(ld2))]
11981pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11982 let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11983 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11984 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11985 ret_val
11986}
11987#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11989#[doc = "## Safety"]
11990#[doc = " * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11994#[cfg_attr(test, assert_instr(ld3r))]
11995pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11996 unsafe extern "unadjusted" {
11997 #[cfg_attr(
11998 any(target_arch = "aarch64", target_arch = "arm64ec"),
11999 link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
12000 )]
12001 fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
12002 }
12003 _vld3_dup_f64(a as _)
12004}
12005#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
12007#[doc = "## Safety"]
12008#[doc = " * Neon intrinsic unsafe"]
12009#[inline(always)]
12010#[target_feature(enable = "neon")]
12011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12012#[cfg_attr(test, assert_instr(ld3r))]
12013pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
12014 unsafe extern "unadjusted" {
12015 #[cfg_attr(
12016 any(target_arch = "aarch64", target_arch = "arm64ec"),
12017 link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
12018 )]
12019 fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
12020 }
12021 _vld3q_dup_f64(a as _)
12022}
12023#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
12025#[doc = "## Safety"]
12026#[doc = " * Neon intrinsic unsafe"]
12027#[inline(always)]
12028#[target_feature(enable = "neon")]
12029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12030#[cfg_attr(test, assert_instr(ld3r))]
12031pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
12032 unsafe extern "unadjusted" {
12033 #[cfg_attr(
12034 any(target_arch = "aarch64", target_arch = "arm64ec"),
12035 link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
12036 )]
12037 fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
12038 }
12039 _vld3q_dup_s64(a as _)
12040}
12041#[doc = "Load multiple 3-element structures to three registers"]
12042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
12043#[doc = "## Safety"]
12044#[doc = " * Neon intrinsic unsafe"]
12045#[inline(always)]
12046#[target_feature(enable = "neon")]
12047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12048#[cfg_attr(test, assert_instr(nop))]
12049pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
12050 unsafe extern "unadjusted" {
12051 #[cfg_attr(
12052 any(target_arch = "aarch64", target_arch = "arm64ec"),
12053 link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
12054 )]
12055 fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
12056 }
12057 _vld3_f64(a as _)
12058}
12059#[doc = "Load multiple 3-element structures to three registers"]
12060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
12061#[doc = "## Safety"]
12062#[doc = " * Neon intrinsic unsafe"]
12063#[inline(always)]
12064#[target_feature(enable = "neon")]
12065#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12066#[rustc_legacy_const_generics(2)]
12067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12068pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
12069 static_assert!(LANE == 0);
12070 unsafe extern "unadjusted" {
12071 #[cfg_attr(
12072 any(target_arch = "aarch64", target_arch = "arm64ec"),
12073 link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12074 )]
12075 fn _vld3_lane_f64(
12076 a: float64x1_t,
12077 b: float64x1_t,
12078 c: float64x1_t,
12079 n: i64,
12080 ptr: *const i8,
12081 ) -> float64x1x3_t;
12082 }
12083 _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12084}
12085#[doc = "Load multiple 3-element structures to three registers"]
12086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12087#[doc = "## Safety"]
12088#[doc = " * Neon intrinsic unsafe"]
12089#[inline(always)]
12090#[target_feature(enable = "neon,aes")]
12091#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12092#[rustc_legacy_const_generics(2)]
12093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12094pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12095 static_assert!(LANE == 0);
12096 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12097}
12098#[doc = "Load multiple 3-element structures to two registers"]
12099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12100#[doc = "## Safety"]
12101#[doc = " * Neon intrinsic unsafe"]
12102#[inline(always)]
12103#[target_feature(enable = "neon")]
12104#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12105#[rustc_legacy_const_generics(2)]
12106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12107pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12108 static_assert!(LANE == 0);
12109 unsafe extern "unadjusted" {
12110 #[cfg_attr(
12111 any(target_arch = "aarch64", target_arch = "arm64ec"),
12112 link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12113 )]
12114 fn _vld3_lane_s64(
12115 a: int64x1_t,
12116 b: int64x1_t,
12117 c: int64x1_t,
12118 n: i64,
12119 ptr: *const i8,
12120 ) -> int64x1x3_t;
12121 }
12122 _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12123}
12124#[doc = "Load multiple 3-element structures to three registers"]
12125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12126#[doc = "## Safety"]
12127#[doc = " * Neon intrinsic unsafe"]
12128#[inline(always)]
12129#[target_feature(enable = "neon")]
12130#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12131#[rustc_legacy_const_generics(2)]
12132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12133pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12134 static_assert!(LANE == 0);
12135 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12136}
12137#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12139#[doc = "## Safety"]
12140#[doc = " * Neon intrinsic unsafe"]
12141#[inline(always)]
12142#[cfg(target_endian = "little")]
12143#[target_feature(enable = "neon,aes")]
12144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12145#[cfg_attr(test, assert_instr(ld3r))]
12146pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12147 transmute(vld3q_dup_s64(transmute(a)))
12148}
12149#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12151#[doc = "## Safety"]
12152#[doc = " * Neon intrinsic unsafe"]
12153#[inline(always)]
12154#[cfg(target_endian = "big")]
12155#[target_feature(enable = "neon,aes")]
12156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12157#[cfg_attr(test, assert_instr(ld3r))]
12158pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12159 let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12160 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12161 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12162 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12163 ret_val
12164}
12165#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12167#[doc = "## Safety"]
12168#[doc = " * Neon intrinsic unsafe"]
12169#[inline(always)]
12170#[cfg(target_endian = "little")]
12171#[target_feature(enable = "neon")]
12172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12173#[cfg_attr(test, assert_instr(ld3r))]
12174pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12175 transmute(vld3q_dup_s64(transmute(a)))
12176}
12177#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12179#[doc = "## Safety"]
12180#[doc = " * Neon intrinsic unsafe"]
12181#[inline(always)]
12182#[cfg(target_endian = "big")]
12183#[target_feature(enable = "neon")]
12184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12185#[cfg_attr(test, assert_instr(ld3r))]
12186pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12187 let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12188 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12189 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12190 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12191 ret_val
12192}
12193#[doc = "Load multiple 3-element structures to three registers"]
12194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12195#[doc = "## Safety"]
12196#[doc = " * Neon intrinsic unsafe"]
12197#[inline(always)]
12198#[target_feature(enable = "neon")]
12199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12200#[cfg_attr(test, assert_instr(ld3))]
12201pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12202 unsafe extern "unadjusted" {
12203 #[cfg_attr(
12204 any(target_arch = "aarch64", target_arch = "arm64ec"),
12205 link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12206 )]
12207 fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12208 }
12209 _vld3q_f64(a as _)
12210}
12211#[doc = "Load multiple 3-element structures to three registers"]
12212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12213#[doc = "## Safety"]
12214#[doc = " * Neon intrinsic unsafe"]
12215#[inline(always)]
12216#[target_feature(enable = "neon")]
12217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12218#[cfg_attr(test, assert_instr(ld3))]
12219pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12220 unsafe extern "unadjusted" {
12221 #[cfg_attr(
12222 any(target_arch = "aarch64", target_arch = "arm64ec"),
12223 link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12224 )]
12225 fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12226 }
12227 _vld3q_s64(a as _)
12228}
12229#[doc = "Load multiple 3-element structures to three registers"]
12230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12231#[doc = "## Safety"]
12232#[doc = " * Neon intrinsic unsafe"]
12233#[inline(always)]
12234#[target_feature(enable = "neon")]
12235#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12236#[rustc_legacy_const_generics(2)]
12237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12238pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12239 static_assert_uimm_bits!(LANE, 1);
12240 unsafe extern "unadjusted" {
12241 #[cfg_attr(
12242 any(target_arch = "aarch64", target_arch = "arm64ec"),
12243 link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12244 )]
12245 fn _vld3q_lane_f64(
12246 a: float64x2_t,
12247 b: float64x2_t,
12248 c: float64x2_t,
12249 n: i64,
12250 ptr: *const i8,
12251 ) -> float64x2x3_t;
12252 }
12253 _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12254}
12255#[doc = "Load multiple 3-element structures to three registers"]
12256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12257#[doc = "## Safety"]
12258#[doc = " * Neon intrinsic unsafe"]
12259#[inline(always)]
12260#[target_feature(enable = "neon,aes")]
12261#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12262#[rustc_legacy_const_generics(2)]
12263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12264pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12265 static_assert_uimm_bits!(LANE, 1);
12266 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12267}
12268#[doc = "Load multiple 3-element structures to two registers"]
12269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12270#[doc = "## Safety"]
12271#[doc = " * Neon intrinsic unsafe"]
12272#[inline(always)]
12273#[target_feature(enable = "neon")]
12274#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12275#[rustc_legacy_const_generics(2)]
12276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12277pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12278 static_assert_uimm_bits!(LANE, 3);
12279 unsafe extern "unadjusted" {
12280 #[cfg_attr(
12281 any(target_arch = "aarch64", target_arch = "arm64ec"),
12282 link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12283 )]
12284 fn _vld3q_lane_s8(
12285 a: int8x16_t,
12286 b: int8x16_t,
12287 c: int8x16_t,
12288 n: i64,
12289 ptr: *const i8,
12290 ) -> int8x16x3_t;
12291 }
12292 _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12293}
12294#[doc = "Load multiple 3-element structures to two registers"]
12295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12296#[doc = "## Safety"]
12297#[doc = " * Neon intrinsic unsafe"]
12298#[inline(always)]
12299#[target_feature(enable = "neon")]
12300#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12301#[rustc_legacy_const_generics(2)]
12302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12303pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12304 static_assert_uimm_bits!(LANE, 1);
12305 unsafe extern "unadjusted" {
12306 #[cfg_attr(
12307 any(target_arch = "aarch64", target_arch = "arm64ec"),
12308 link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12309 )]
12310 fn _vld3q_lane_s64(
12311 a: int64x2_t,
12312 b: int64x2_t,
12313 c: int64x2_t,
12314 n: i64,
12315 ptr: *const i8,
12316 ) -> int64x2x3_t;
12317 }
12318 _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12319}
12320#[doc = "Load multiple 3-element structures to three registers"]
12321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12322#[doc = "## Safety"]
12323#[doc = " * Neon intrinsic unsafe"]
12324#[inline(always)]
12325#[target_feature(enable = "neon")]
12326#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12327#[rustc_legacy_const_generics(2)]
12328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12329pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12330 static_assert_uimm_bits!(LANE, 4);
12331 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12332}
12333#[doc = "Load multiple 3-element structures to three registers"]
12334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12335#[doc = "## Safety"]
12336#[doc = " * Neon intrinsic unsafe"]
12337#[inline(always)]
12338#[target_feature(enable = "neon")]
12339#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12340#[rustc_legacy_const_generics(2)]
12341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12342pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12343 static_assert_uimm_bits!(LANE, 1);
12344 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12345}
12346#[doc = "Load multiple 3-element structures to three registers"]
12347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12348#[doc = "## Safety"]
12349#[doc = " * Neon intrinsic unsafe"]
12350#[inline(always)]
12351#[target_feature(enable = "neon")]
12352#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12353#[rustc_legacy_const_generics(2)]
12354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12355pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12356 static_assert_uimm_bits!(LANE, 4);
12357 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12358}
12359#[doc = "Load multiple 3-element structures to three registers"]
12360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12361#[doc = "## Safety"]
12362#[doc = " * Neon intrinsic unsafe"]
12363#[inline(always)]
12364#[cfg(target_endian = "little")]
12365#[target_feature(enable = "neon,aes")]
12366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12367#[cfg_attr(test, assert_instr(ld3))]
12368pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12369 transmute(vld3q_s64(transmute(a)))
12370}
12371#[doc = "Load multiple 3-element structures to three registers"]
12372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12373#[doc = "## Safety"]
12374#[doc = " * Neon intrinsic unsafe"]
12375#[inline(always)]
12376#[cfg(target_endian = "big")]
12377#[target_feature(enable = "neon,aes")]
12378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12379#[cfg_attr(test, assert_instr(ld3))]
12380pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12381 let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12382 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12383 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12384 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12385 ret_val
12386}
12387#[doc = "Load multiple 3-element structures to three registers"]
12388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12389#[doc = "## Safety"]
12390#[doc = " * Neon intrinsic unsafe"]
12391#[inline(always)]
12392#[cfg(target_endian = "little")]
12393#[target_feature(enable = "neon")]
12394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12395#[cfg_attr(test, assert_instr(ld3))]
12396pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12397 transmute(vld3q_s64(transmute(a)))
12398}
12399#[doc = "Load multiple 3-element structures to three registers"]
12400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12401#[doc = "## Safety"]
12402#[doc = " * Neon intrinsic unsafe"]
12403#[inline(always)]
12404#[cfg(target_endian = "big")]
12405#[target_feature(enable = "neon")]
12406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12407#[cfg_attr(test, assert_instr(ld3))]
12408pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12409 let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12410 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12411 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12412 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12413 ret_val
12414}
12415#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12417#[doc = "## Safety"]
12418#[doc = " * Neon intrinsic unsafe"]
12419#[inline(always)]
12420#[target_feature(enable = "neon")]
12421#[cfg_attr(test, assert_instr(ld4r))]
12422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12423pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12424 unsafe extern "unadjusted" {
12425 #[cfg_attr(
12426 any(target_arch = "aarch64", target_arch = "arm64ec"),
12427 link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12428 )]
12429 fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12430 }
12431 _vld4_dup_f64(a as _)
12432}
12433#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12435#[doc = "## Safety"]
12436#[doc = " * Neon intrinsic unsafe"]
12437#[inline(always)]
12438#[target_feature(enable = "neon")]
12439#[cfg_attr(test, assert_instr(ld4r))]
12440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12441pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12442 unsafe extern "unadjusted" {
12443 #[cfg_attr(
12444 any(target_arch = "aarch64", target_arch = "arm64ec"),
12445 link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12446 )]
12447 fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12448 }
12449 _vld4q_dup_f64(a as _)
12450}
12451#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12453#[doc = "## Safety"]
12454#[doc = " * Neon intrinsic unsafe"]
12455#[inline(always)]
12456#[target_feature(enable = "neon")]
12457#[cfg_attr(test, assert_instr(ld4r))]
12458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12459pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12460 unsafe extern "unadjusted" {
12461 #[cfg_attr(
12462 any(target_arch = "aarch64", target_arch = "arm64ec"),
12463 link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12464 )]
12465 fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12466 }
12467 _vld4q_dup_s64(a as _)
12468}
12469#[doc = "Load multiple 4-element structures to four registers"]
12470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12471#[doc = "## Safety"]
12472#[doc = " * Neon intrinsic unsafe"]
12473#[inline(always)]
12474#[target_feature(enable = "neon")]
12475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12476#[cfg_attr(test, assert_instr(nop))]
12477pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12478 unsafe extern "unadjusted" {
12479 #[cfg_attr(
12480 any(target_arch = "aarch64", target_arch = "arm64ec"),
12481 link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12482 )]
12483 fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12484 }
12485 _vld4_f64(a as _)
12486}
12487#[doc = "Load multiple 4-element structures to four registers"]
12488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12489#[doc = "## Safety"]
12490#[doc = " * Neon intrinsic unsafe"]
12491#[inline(always)]
12492#[target_feature(enable = "neon")]
12493#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12494#[rustc_legacy_const_generics(2)]
12495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12496pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12497 static_assert!(LANE == 0);
12498 unsafe extern "unadjusted" {
12499 #[cfg_attr(
12500 any(target_arch = "aarch64", target_arch = "arm64ec"),
12501 link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12502 )]
12503 fn _vld4_lane_f64(
12504 a: float64x1_t,
12505 b: float64x1_t,
12506 c: float64x1_t,
12507 d: float64x1_t,
12508 n: i64,
12509 ptr: *const i8,
12510 ) -> float64x1x4_t;
12511 }
12512 _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12513}
12514#[doc = "Load multiple 4-element structures to four registers"]
12515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12516#[doc = "## Safety"]
12517#[doc = " * Neon intrinsic unsafe"]
12518#[inline(always)]
12519#[target_feature(enable = "neon")]
12520#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12521#[rustc_legacy_const_generics(2)]
12522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12523pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12524 static_assert!(LANE == 0);
12525 unsafe extern "unadjusted" {
12526 #[cfg_attr(
12527 any(target_arch = "aarch64", target_arch = "arm64ec"),
12528 link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12529 )]
12530 fn _vld4_lane_s64(
12531 a: int64x1_t,
12532 b: int64x1_t,
12533 c: int64x1_t,
12534 d: int64x1_t,
12535 n: i64,
12536 ptr: *const i8,
12537 ) -> int64x1x4_t;
12538 }
12539 _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12540}
12541#[doc = "Load multiple 4-element structures to four registers"]
12542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12543#[doc = "## Safety"]
12544#[doc = " * Neon intrinsic unsafe"]
12545#[inline(always)]
12546#[target_feature(enable = "neon,aes")]
12547#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12548#[rustc_legacy_const_generics(2)]
12549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12550pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12551 static_assert!(LANE == 0);
12552 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12553}
12554#[doc = "Load multiple 4-element structures to four registers"]
12555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12556#[doc = "## Safety"]
12557#[doc = " * Neon intrinsic unsafe"]
12558#[inline(always)]
12559#[target_feature(enable = "neon")]
12560#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12561#[rustc_legacy_const_generics(2)]
12562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12563pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12564 static_assert!(LANE == 0);
12565 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12566}
12567#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12569#[doc = "## Safety"]
12570#[doc = " * Neon intrinsic unsafe"]
12571#[inline(always)]
12572#[cfg(target_endian = "little")]
12573#[target_feature(enable = "neon,aes")]
12574#[cfg_attr(test, assert_instr(ld4r))]
12575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12576pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12577 transmute(vld4q_dup_s64(transmute(a)))
12578}
12579#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12581#[doc = "## Safety"]
12582#[doc = " * Neon intrinsic unsafe"]
12583#[inline(always)]
12584#[cfg(target_endian = "big")]
12585#[target_feature(enable = "neon,aes")]
12586#[cfg_attr(test, assert_instr(ld4r))]
12587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12588pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12589 let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12590 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12591 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12592 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12593 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12594 ret_val
12595}
12596#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12598#[doc = "## Safety"]
12599#[doc = " * Neon intrinsic unsafe"]
12600#[inline(always)]
12601#[cfg(target_endian = "little")]
12602#[target_feature(enable = "neon")]
12603#[cfg_attr(test, assert_instr(ld4r))]
12604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12605pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12606 transmute(vld4q_dup_s64(transmute(a)))
12607}
12608#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12610#[doc = "## Safety"]
12611#[doc = " * Neon intrinsic unsafe"]
12612#[inline(always)]
12613#[cfg(target_endian = "big")]
12614#[target_feature(enable = "neon")]
12615#[cfg_attr(test, assert_instr(ld4r))]
12616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12617pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12618 let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12619 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12620 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12621 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12622 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12623 ret_val
12624}
12625#[doc = "Load multiple 4-element structures to four registers"]
12626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12627#[doc = "## Safety"]
12628#[doc = " * Neon intrinsic unsafe"]
12629#[inline(always)]
12630#[target_feature(enable = "neon")]
12631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12632#[cfg_attr(test, assert_instr(ld4))]
12633pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12634 unsafe extern "unadjusted" {
12635 #[cfg_attr(
12636 any(target_arch = "aarch64", target_arch = "arm64ec"),
12637 link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12638 )]
12639 fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12640 }
12641 _vld4q_f64(a as _)
12642}
12643#[doc = "Load multiple 4-element structures to four registers"]
12644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12645#[doc = "## Safety"]
12646#[doc = " * Neon intrinsic unsafe"]
12647#[inline(always)]
12648#[target_feature(enable = "neon")]
12649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12650#[cfg_attr(test, assert_instr(ld4))]
12651pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12652 unsafe extern "unadjusted" {
12653 #[cfg_attr(
12654 any(target_arch = "aarch64", target_arch = "arm64ec"),
12655 link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12656 )]
12657 fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12658 }
12659 _vld4q_s64(a as _)
12660}
12661#[doc = "Load multiple 4-element structures to four registers"]
12662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12663#[doc = "## Safety"]
12664#[doc = " * Neon intrinsic unsafe"]
12665#[inline(always)]
12666#[target_feature(enable = "neon")]
12667#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12668#[rustc_legacy_const_generics(2)]
12669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12670pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12671 static_assert_uimm_bits!(LANE, 1);
12672 unsafe extern "unadjusted" {
12673 #[cfg_attr(
12674 any(target_arch = "aarch64", target_arch = "arm64ec"),
12675 link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12676 )]
12677 fn _vld4q_lane_f64(
12678 a: float64x2_t,
12679 b: float64x2_t,
12680 c: float64x2_t,
12681 d: float64x2_t,
12682 n: i64,
12683 ptr: *const i8,
12684 ) -> float64x2x4_t;
12685 }
12686 _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12687}
12688#[doc = "Load multiple 4-element structures to four registers"]
12689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12690#[doc = "## Safety"]
12691#[doc = " * Neon intrinsic unsafe"]
12692#[inline(always)]
12693#[target_feature(enable = "neon")]
12694#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12695#[rustc_legacy_const_generics(2)]
12696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12697pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12698 static_assert_uimm_bits!(LANE, 3);
12699 unsafe extern "unadjusted" {
12700 #[cfg_attr(
12701 any(target_arch = "aarch64", target_arch = "arm64ec"),
12702 link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12703 )]
12704 fn _vld4q_lane_s8(
12705 a: int8x16_t,
12706 b: int8x16_t,
12707 c: int8x16_t,
12708 d: int8x16_t,
12709 n: i64,
12710 ptr: *const i8,
12711 ) -> int8x16x4_t;
12712 }
12713 _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12714}
12715#[doc = "Load multiple 4-element structures to four registers"]
12716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12717#[doc = "## Safety"]
12718#[doc = " * Neon intrinsic unsafe"]
12719#[inline(always)]
12720#[target_feature(enable = "neon")]
12721#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12722#[rustc_legacy_const_generics(2)]
12723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12724pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12725 static_assert_uimm_bits!(LANE, 1);
12726 unsafe extern "unadjusted" {
12727 #[cfg_attr(
12728 any(target_arch = "aarch64", target_arch = "arm64ec"),
12729 link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12730 )]
12731 fn _vld4q_lane_s64(
12732 a: int64x2_t,
12733 b: int64x2_t,
12734 c: int64x2_t,
12735 d: int64x2_t,
12736 n: i64,
12737 ptr: *const i8,
12738 ) -> int64x2x4_t;
12739 }
12740 _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12741}
12742#[doc = "Load multiple 4-element structures to four registers"]
12743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12744#[doc = "## Safety"]
12745#[doc = " * Neon intrinsic unsafe"]
12746#[inline(always)]
12747#[target_feature(enable = "neon,aes")]
12748#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12749#[rustc_legacy_const_generics(2)]
12750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12751pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12752 static_assert_uimm_bits!(LANE, 1);
12753 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12754}
12755#[doc = "Load multiple 4-element structures to four registers"]
12756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12757#[doc = "## Safety"]
12758#[doc = " * Neon intrinsic unsafe"]
12759#[inline(always)]
12760#[target_feature(enable = "neon")]
12761#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12762#[rustc_legacy_const_generics(2)]
12763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12764pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12765 static_assert_uimm_bits!(LANE, 4);
12766 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12767}
12768#[doc = "Load multiple 4-element structures to four registers"]
12769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12770#[doc = "## Safety"]
12771#[doc = " * Neon intrinsic unsafe"]
12772#[inline(always)]
12773#[target_feature(enable = "neon")]
12774#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12775#[rustc_legacy_const_generics(2)]
12776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12777pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12778 static_assert_uimm_bits!(LANE, 1);
12779 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12780}
12781#[doc = "Load multiple 4-element structures to four registers"]
12782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12783#[doc = "## Safety"]
12784#[doc = " * Neon intrinsic unsafe"]
12785#[inline(always)]
12786#[target_feature(enable = "neon")]
12787#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12788#[rustc_legacy_const_generics(2)]
12789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12790pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12791 static_assert_uimm_bits!(LANE, 4);
12792 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12793}
12794#[doc = "Load multiple 4-element structures to four registers"]
12795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12796#[doc = "## Safety"]
12797#[doc = " * Neon intrinsic unsafe"]
12798#[inline(always)]
12799#[cfg(target_endian = "little")]
12800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12801#[target_feature(enable = "neon,aes")]
12802#[cfg_attr(test, assert_instr(ld4))]
12803pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12804 transmute(vld4q_s64(transmute(a)))
12805}
12806#[doc = "Load multiple 4-element structures to four registers"]
12807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12808#[doc = "## Safety"]
12809#[doc = " * Neon intrinsic unsafe"]
12810#[inline(always)]
12811#[cfg(target_endian = "big")]
12812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12813#[target_feature(enable = "neon,aes")]
12814#[cfg_attr(test, assert_instr(ld4))]
12815pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12816 let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12817 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12818 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12819 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12820 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12821 ret_val
12822}
12823#[doc = "Load multiple 4-element structures to four registers"]
12824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12825#[doc = "## Safety"]
12826#[doc = " * Neon intrinsic unsafe"]
12827#[inline(always)]
12828#[cfg(target_endian = "little")]
12829#[target_feature(enable = "neon")]
12830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12831#[cfg_attr(test, assert_instr(ld4))]
12832pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12833 transmute(vld4q_s64(transmute(a)))
12834}
12835#[doc = "Load multiple 4-element structures to four registers"]
12836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12837#[doc = "## Safety"]
12838#[doc = " * Neon intrinsic unsafe"]
12839#[inline(always)]
12840#[cfg(target_endian = "big")]
12841#[target_feature(enable = "neon")]
12842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12843#[cfg_attr(test, assert_instr(ld4))]
12844pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12845 let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12846 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12847 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12848 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12849 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12850 ret_val
12851}
12852#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12854#[doc = "## Safety"]
12855#[doc = " * Neon intrinsic unsafe"]
12856#[inline(always)]
12857#[target_feature(enable = "neon,rcpc3")]
12858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12859#[rustc_legacy_const_generics(2)]
12860#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12861pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12862 static_assert!(LANE == 0);
12863 let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12864 simd_insert!(
12865 src,
12866 LANE as u32,
12867 atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12868 )
12869}
12870#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12872#[doc = "## Safety"]
12873#[doc = " * Neon intrinsic unsafe"]
12874#[inline(always)]
12875#[target_feature(enable = "neon,rcpc3")]
12876#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12877#[rustc_legacy_const_generics(2)]
12878#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12879pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12880 static_assert_uimm_bits!(LANE, 1);
12881 let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12882 simd_insert!(
12883 src,
12884 LANE as u32,
12885 atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12886 )
12887}
12888#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12890#[doc = "## Safety"]
12891#[doc = " * Neon intrinsic unsafe"]
12892#[inline(always)]
12893#[rustc_legacy_const_generics(2)]
12894#[target_feature(enable = "neon,rcpc3")]
12895#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12896#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12897pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12898 static_assert_uimm_bits!(LANE, 1);
12899 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12900}
12901#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12903#[doc = "## Safety"]
12904#[doc = " * Neon intrinsic unsafe"]
12905#[inline(always)]
12906#[rustc_legacy_const_generics(2)]
12907#[target_feature(enable = "neon,rcpc3")]
12908#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12909#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12910pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12911 static_assert!(LANE == 0);
12912 transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12913}
12914#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12916#[doc = "## Safety"]
12917#[doc = " * Neon intrinsic unsafe"]
12918#[inline(always)]
12919#[rustc_legacy_const_generics(2)]
12920#[target_feature(enable = "neon,rcpc3")]
12921#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12922#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12923pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12924 static_assert_uimm_bits!(LANE, 1);
12925 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12926}
12927#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12929#[doc = "## Safety"]
12930#[doc = " * Neon intrinsic unsafe"]
12931#[inline(always)]
12932#[rustc_legacy_const_generics(2)]
12933#[target_feature(enable = "neon,rcpc3")]
12934#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12935#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12936pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12937 static_assert!(LANE == 0);
12938 transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12939}
12940#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12942#[doc = "## Safety"]
12943#[doc = " * Neon intrinsic unsafe"]
12944#[inline(always)]
12945#[rustc_legacy_const_generics(2)]
12946#[target_feature(enable = "neon,rcpc3")]
12947#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12948#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12949pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12950 static_assert_uimm_bits!(LANE, 1);
12951 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12952}
12953#[doc = "Lookup table read with 2-bit indices"]
12954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12955#[doc = "## Safety"]
12956#[doc = " * Neon intrinsic unsafe"]
12957#[inline(always)]
12958#[target_feature(enable = "neon,lut")]
12959#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12960#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12961#[rustc_legacy_const_generics(2)]
12962pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12963 static_assert!(INDEX >= 0 && INDEX <= 3);
12964 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12965}
12966#[doc = "Lookup table read with 2-bit indices"]
12967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12968#[doc = "## Safety"]
12969#[doc = " * Neon intrinsic unsafe"]
12970#[inline(always)]
12971#[target_feature(enable = "neon,lut")]
12972#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12973#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12974#[rustc_legacy_const_generics(2)]
12975pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12976 static_assert!(INDEX >= 0 && INDEX <= 3);
12977 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12978}
12979#[doc = "Lookup table read with 2-bit indices"]
12980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12981#[doc = "## Safety"]
12982#[doc = " * Neon intrinsic unsafe"]
12983#[inline(always)]
12984#[target_feature(enable = "neon,lut")]
12985#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12986#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12987#[rustc_legacy_const_generics(2)]
12988pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12989 static_assert!(INDEX >= 0 && INDEX <= 1);
12990 transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12991}
12992#[doc = "Lookup table read with 2-bit indices"]
12993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12994#[doc = "## Safety"]
12995#[doc = " * Neon intrinsic unsafe"]
12996#[inline(always)]
12997#[target_feature(enable = "neon,lut")]
12998#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12999#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13000#[rustc_legacy_const_generics(2)]
13001pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13002 static_assert!(INDEX >= 0 && INDEX <= 1);
13003 transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
13004}
13005#[doc = "Lookup table read with 2-bit indices"]
13006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
13007#[doc = "## Safety"]
13008#[doc = " * Neon intrinsic unsafe"]
13009#[inline(always)]
13010#[target_feature(enable = "neon,lut")]
13011#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13012#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13013#[rustc_legacy_const_generics(2)]
13014pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
13015 static_assert!(INDEX >= 0 && INDEX <= 3);
13016 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
13017}
13018#[doc = "Lookup table read with 2-bit indices"]
13019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
13020#[doc = "## Safety"]
13021#[doc = " * Neon intrinsic unsafe"]
13022#[inline(always)]
13023#[target_feature(enable = "neon,lut")]
13024#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13025#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13026#[rustc_legacy_const_generics(2)]
13027pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
13028 static_assert!(INDEX >= 0 && INDEX <= 3);
13029 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
13030}
13031#[doc = "Lookup table read with 2-bit indices"]
13032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
13033#[doc = "## Safety"]
13034#[doc = " * Neon intrinsic unsafe"]
13035#[inline(always)]
13036#[target_feature(enable = "neon,lut")]
13037#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13038#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13039#[rustc_legacy_const_generics(2)]
13040pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
13041 static_assert!(INDEX >= 0 && INDEX <= 1);
13042 transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
13043}
13044#[doc = "Lookup table read with 2-bit indices"]
13045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
13046#[doc = "## Safety"]
13047#[doc = " * Neon intrinsic unsafe"]
13048#[inline(always)]
13049#[target_feature(enable = "neon,lut")]
13050#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13051#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13052#[rustc_legacy_const_generics(2)]
13053pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13054 static_assert!(INDEX >= 0 && INDEX <= 1);
13055 transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
13056}
13057#[doc = "Lookup table read with 2-bit indices"]
13058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
13059#[doc = "## Safety"]
13060#[doc = " * Neon intrinsic unsafe"]
13061#[inline(always)]
13062#[target_feature(enable = "neon,lut")]
13063#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13064#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13065#[rustc_legacy_const_generics(2)]
13066pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
13067 static_assert!(INDEX >= 0 && INDEX <= 3);
13068 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
13069}
13070#[doc = "Lookup table read with 2-bit indices"]
13071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
13072#[doc = "## Safety"]
13073#[doc = " * Neon intrinsic unsafe"]
13074#[inline(always)]
13075#[target_feature(enable = "neon,lut")]
13076#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13077#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13078#[rustc_legacy_const_generics(2)]
13079pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
13080 static_assert!(INDEX >= 0 && INDEX <= 3);
13081 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
13082}
13083#[doc = "Lookup table read with 2-bit indices"]
13084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
13085#[doc = "## Safety"]
13086#[doc = " * Neon intrinsic unsafe"]
13087#[inline(always)]
13088#[target_feature(enable = "neon,lut")]
13089#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13090#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13091#[rustc_legacy_const_generics(2)]
13092pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
13093 static_assert!(LANE >= 0 && LANE <= 1);
13094 unsafe extern "unadjusted" {
13095 #[cfg_attr(
13096 any(target_arch = "aarch64", target_arch = "arm64ec"),
13097 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
13098 )]
13099 fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
13100 }
13101 _vluti2_lane_s8(a, b, LANE)
13102}
13103#[doc = "Lookup table read with 2-bit indices"]
13104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
13105#[doc = "## Safety"]
13106#[doc = " * Neon intrinsic unsafe"]
13107#[inline(always)]
13108#[target_feature(enable = "neon,lut")]
13109#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13110#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13111#[rustc_legacy_const_generics(2)]
13112pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13113 static_assert!(LANE >= 0 && LANE <= 1);
13114 unsafe extern "unadjusted" {
13115 #[cfg_attr(
13116 any(target_arch = "aarch64", target_arch = "arm64ec"),
13117 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
13118 )]
13119 fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13120 }
13121 _vluti2q_lane_s8(a, b, LANE)
13122}
13123#[doc = "Lookup table read with 2-bit indices"]
13124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
13125#[doc = "## Safety"]
13126#[doc = " * Neon intrinsic unsafe"]
13127#[inline(always)]
13128#[target_feature(enable = "neon,lut")]
13129#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13130#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13131#[rustc_legacy_const_generics(2)]
13132pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
13133 static_assert!(LANE >= 0 && LANE <= 3);
13134 unsafe extern "unadjusted" {
13135 #[cfg_attr(
13136 any(target_arch = "aarch64", target_arch = "arm64ec"),
13137 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13138 )]
13139 fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13140 }
13141 _vluti2_lane_s16(a, b, LANE)
13142}
13143#[doc = "Lookup table read with 2-bit indices"]
13144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13145#[doc = "## Safety"]
13146#[doc = " * Neon intrinsic unsafe"]
13147#[inline(always)]
13148#[target_feature(enable = "neon,lut")]
13149#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13150#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13151#[rustc_legacy_const_generics(2)]
13152pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13153 static_assert!(LANE >= 0 && LANE <= 3);
13154 unsafe extern "unadjusted" {
13155 #[cfg_attr(
13156 any(target_arch = "aarch64", target_arch = "arm64ec"),
13157 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13158 )]
13159 fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13160 }
13161 _vluti2q_lane_s16(a, b, LANE)
13162}
13163#[doc = "Lookup table read with 2-bit indices"]
13164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13165#[doc = "## Safety"]
13166#[doc = " * Neon intrinsic unsafe"]
13167#[inline(always)]
13168#[target_feature(enable = "neon,lut")]
13169#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13170#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13171#[rustc_legacy_const_generics(2)]
13172pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13173 static_assert!(INDEX >= 0 && INDEX <= 7);
13174 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13175}
13176#[doc = "Lookup table read with 2-bit indices"]
13177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13178#[doc = "## Safety"]
13179#[doc = " * Neon intrinsic unsafe"]
13180#[inline(always)]
13181#[target_feature(enable = "neon,lut")]
13182#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13183#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13184#[rustc_legacy_const_generics(2)]
13185pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13186 static_assert!(INDEX >= 0 && INDEX <= 7);
13187 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13188}
13189#[doc = "Lookup table read with 2-bit indices"]
13190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13191#[doc = "## Safety"]
13192#[doc = " * Neon intrinsic unsafe"]
13193#[inline(always)]
13194#[target_feature(enable = "neon,lut")]
13195#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13196#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13197#[rustc_legacy_const_generics(2)]
13198pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13199 static_assert!(INDEX >= 0 && INDEX <= 3);
13200 transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13201}
13202#[doc = "Lookup table read with 2-bit indices"]
13203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13204#[doc = "## Safety"]
13205#[doc = " * Neon intrinsic unsafe"]
13206#[inline(always)]
13207#[target_feature(enable = "neon,lut")]
13208#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13209#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13210#[rustc_legacy_const_generics(2)]
13211pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13212 static_assert!(INDEX >= 0 && INDEX <= 3);
13213 transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13214}
13215#[doc = "Lookup table read with 2-bit indices"]
13216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13217#[doc = "## Safety"]
13218#[doc = " * Neon intrinsic unsafe"]
13219#[inline(always)]
13220#[target_feature(enable = "neon,lut")]
13221#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13222#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13223#[rustc_legacy_const_generics(2)]
13224pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13225 static_assert!(INDEX >= 0 && INDEX <= 7);
13226 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13227}
13228#[doc = "Lookup table read with 2-bit indices"]
13229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13230#[doc = "## Safety"]
13231#[doc = " * Neon intrinsic unsafe"]
13232#[inline(always)]
13233#[target_feature(enable = "neon,lut")]
13234#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13235#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13236#[rustc_legacy_const_generics(2)]
13237pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13238 static_assert!(INDEX >= 0 && INDEX <= 7);
13239 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13240}
13241#[doc = "Lookup table read with 2-bit indices"]
13242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13243#[doc = "## Safety"]
13244#[doc = " * Neon intrinsic unsafe"]
13245#[inline(always)]
13246#[target_feature(enable = "neon,lut")]
13247#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13248#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13249#[rustc_legacy_const_generics(2)]
13250pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13251 static_assert!(INDEX >= 0 && INDEX <= 3);
13252 transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13253}
13254#[doc = "Lookup table read with 2-bit indices"]
13255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13256#[doc = "## Safety"]
13257#[doc = " * Neon intrinsic unsafe"]
13258#[inline(always)]
13259#[target_feature(enable = "neon,lut")]
13260#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13261#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13262#[rustc_legacy_const_generics(2)]
13263pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13264 static_assert!(INDEX >= 0 && INDEX <= 3);
13265 transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13266}
13267#[doc = "Lookup table read with 2-bit indices"]
13268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13269#[doc = "## Safety"]
13270#[doc = " * Neon intrinsic unsafe"]
13271#[inline(always)]
13272#[target_feature(enable = "neon,lut")]
13273#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13274#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13275#[rustc_legacy_const_generics(2)]
13276pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13277 static_assert!(INDEX >= 0 && INDEX <= 7);
13278 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13279}
13280#[doc = "Lookup table read with 2-bit indices"]
13281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13282#[doc = "## Safety"]
13283#[doc = " * Neon intrinsic unsafe"]
13284#[inline(always)]
13285#[target_feature(enable = "neon,lut")]
13286#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13287#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13288#[rustc_legacy_const_generics(2)]
13289pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13290 static_assert!(INDEX >= 0 && INDEX <= 7);
13291 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13292}
13293#[doc = "Lookup table read with 2-bit indices"]
13294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13295#[doc = "## Safety"]
13296#[doc = " * Neon intrinsic unsafe"]
13297#[inline(always)]
13298#[target_feature(enable = "neon,lut")]
13299#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13300#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13301#[rustc_legacy_const_generics(2)]
13302pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13303 static_assert!(INDEX >= 0 && INDEX <= 3);
13304 unsafe extern "unadjusted" {
13305 #[cfg_attr(
13306 any(target_arch = "aarch64", target_arch = "arm64ec"),
13307 link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13308 )]
13309 fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13310 }
13311 _vluti2_laneq_s8(a, b, INDEX)
13312}
13313#[doc = "Lookup table read with 2-bit indices"]
13314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13315#[doc = "## Safety"]
13316#[doc = " * Neon intrinsic unsafe"]
13317#[inline(always)]
13318#[target_feature(enable = "neon,lut")]
13319#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13320#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13321#[rustc_legacy_const_generics(2)]
13322pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13323 static_assert!(INDEX >= 0 && INDEX <= 3);
13324 unsafe extern "unadjusted" {
13325 #[cfg_attr(
13326 any(target_arch = "aarch64", target_arch = "arm64ec"),
13327 link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13328 )]
13329 fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13330 }
13331 _vluti2q_laneq_s8(a, b, INDEX)
13332}
13333#[doc = "Lookup table read with 2-bit indices"]
13334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13335#[doc = "## Safety"]
13336#[doc = " * Neon intrinsic unsafe"]
13337#[inline(always)]
13338#[target_feature(enable = "neon,lut")]
13339#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13340#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13341#[rustc_legacy_const_generics(2)]
13342pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13343 static_assert!(INDEX >= 0 && INDEX <= 7);
13344 unsafe extern "unadjusted" {
13345 #[cfg_attr(
13346 any(target_arch = "aarch64", target_arch = "arm64ec"),
13347 link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13348 )]
13349 fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13350 }
13351 _vluti2_laneq_s16(a, b, INDEX)
13352}
13353#[doc = "Lookup table read with 2-bit indices"]
13354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13355#[doc = "## Safety"]
13356#[doc = " * Neon intrinsic unsafe"]
13357#[inline(always)]
13358#[target_feature(enable = "neon,lut")]
13359#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13360#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13361#[rustc_legacy_const_generics(2)]
13362pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13363 static_assert!(INDEX >= 0 && INDEX <= 7);
13364 unsafe extern "unadjusted" {
13365 #[cfg_attr(
13366 any(target_arch = "aarch64", target_arch = "arm64ec"),
13367 link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13368 )]
13369 fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13370 }
13371 _vluti2q_laneq_s16(a, b, INDEX)
13372}
13373#[doc = "Lookup table read with 4-bit indices"]
13374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13375#[doc = "## Safety"]
13376#[doc = " * Neon intrinsic unsafe"]
13377#[inline(always)]
13378#[target_feature(enable = "neon,lut,fp16")]
13379#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13380#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13381#[rustc_legacy_const_generics(2)]
13382pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13383 static_assert!(LANE >= 0 && LANE <= 1);
13384 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13385}
13386#[doc = "Lookup table read with 4-bit indices"]
13387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13388#[doc = "## Safety"]
13389#[doc = " * Neon intrinsic unsafe"]
13390#[inline(always)]
13391#[target_feature(enable = "neon,lut")]
13392#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13393#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13394#[rustc_legacy_const_generics(2)]
13395pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13396 static_assert!(LANE >= 0 && LANE <= 1);
13397 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13398}
13399#[doc = "Lookup table read with 4-bit indices"]
13400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13401#[doc = "## Safety"]
13402#[doc = " * Neon intrinsic unsafe"]
13403#[inline(always)]
13404#[target_feature(enable = "neon,lut")]
13405#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13406#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13407#[rustc_legacy_const_generics(2)]
13408pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13409 static_assert!(LANE >= 0 && LANE <= 1);
13410 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13411}
13412#[doc = "Lookup table read with 4-bit indices"]
13413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13414#[doc = "## Safety"]
13415#[doc = " * Neon intrinsic unsafe"]
13416#[inline(always)]
13417#[target_feature(enable = "neon,lut")]
13418#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13419#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13420#[rustc_legacy_const_generics(2)]
13421pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13422 static_assert!(LANE >= 0 && LANE <= 1);
13423 unsafe extern "unadjusted" {
13424 #[cfg_attr(
13425 any(target_arch = "aarch64", target_arch = "arm64ec"),
13426 link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13427 )]
13428 fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13429 }
13430 _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13431}
13432#[doc = "Lookup table read with 4-bit indices"]
13433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13434#[doc = "## Safety"]
13435#[doc = " * Neon intrinsic unsafe"]
13436#[inline(always)]
13437#[target_feature(enable = "neon,lut")]
13438#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13439#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13440#[rustc_legacy_const_generics(2)]
13441pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13442 static_assert!(LANE == 0);
13443 unsafe extern "unadjusted" {
13444 #[cfg_attr(
13445 any(target_arch = "aarch64", target_arch = "arm64ec"),
13446 link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13447 )]
13448 fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13449 }
13450 _vluti4q_lane_s8(a, b, LANE)
13451}
13452#[doc = "Lookup table read with 4-bit indices"]
13453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13454#[doc = "## Safety"]
13455#[doc = " * Neon intrinsic unsafe"]
13456#[inline(always)]
13457#[target_feature(enable = "neon,lut")]
13458#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13459#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13460#[rustc_legacy_const_generics(2)]
13461pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13462 static_assert!(LANE == 0);
13463 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13464}
13465#[doc = "Lookup table read with 4-bit indices"]
13466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13467#[doc = "## Safety"]
13468#[doc = " * Neon intrinsic unsafe"]
13469#[inline(always)]
13470#[target_feature(enable = "neon,lut")]
13471#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13472#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13473#[rustc_legacy_const_generics(2)]
13474pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13475 static_assert!(LANE == 0);
13476 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13477}
13478#[doc = "Lookup table read with 4-bit indices"]
13479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13480#[doc = "## Safety"]
13481#[doc = " * Neon intrinsic unsafe"]
13482#[inline(always)]
13483#[target_feature(enable = "neon,lut,fp16")]
13484#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13485#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13486#[rustc_legacy_const_generics(2)]
13487pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13488 a: float16x8x2_t,
13489 b: uint8x16_t,
13490) -> float16x8_t {
13491 static_assert!(LANE >= 0 && LANE <= 3);
13492 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13493}
13494#[doc = "Lookup table read with 4-bit indices"]
13495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13496#[doc = "## Safety"]
13497#[doc = " * Neon intrinsic unsafe"]
13498#[inline(always)]
13499#[target_feature(enable = "neon,lut")]
13500#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13501#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13502#[rustc_legacy_const_generics(2)]
13503pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13504 static_assert!(LANE >= 0 && LANE <= 3);
13505 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13506}
13507#[doc = "Lookup table read with 4-bit indices"]
13508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13509#[doc = "## Safety"]
13510#[doc = " * Neon intrinsic unsafe"]
13511#[inline(always)]
13512#[target_feature(enable = "neon,lut")]
13513#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13514#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13515#[rustc_legacy_const_generics(2)]
13516pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13517 static_assert!(LANE >= 0 && LANE <= 3);
13518 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13519}
13520#[doc = "Lookup table read with 4-bit indices"]
13521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13522#[doc = "## Safety"]
13523#[doc = " * Neon intrinsic unsafe"]
13524#[inline(always)]
13525#[target_feature(enable = "neon,lut")]
13526#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13527#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13528#[rustc_legacy_const_generics(2)]
13529pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13530 static_assert!(LANE >= 0 && LANE <= 3);
13531 unsafe extern "unadjusted" {
13532 #[cfg_attr(
13533 any(target_arch = "aarch64", target_arch = "arm64ec"),
13534 link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13535 )]
13536 fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13537 }
13538 _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13539}
13540#[doc = "Lookup table read with 4-bit indices"]
13541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13542#[doc = "## Safety"]
13543#[doc = " * Neon intrinsic unsafe"]
13544#[inline(always)]
13545#[target_feature(enable = "neon,lut")]
13546#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13547#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13548#[rustc_legacy_const_generics(2)]
13549pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13550 static_assert!(LANE >= 0 && LANE <= 1);
13551 unsafe extern "unadjusted" {
13552 #[cfg_attr(
13553 any(target_arch = "aarch64", target_arch = "arm64ec"),
13554 link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13555 )]
13556 fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13557 }
13558 _vluti4q_laneq_s8(a, b, LANE)
13559}
13560#[doc = "Lookup table read with 4-bit indices"]
13561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13562#[doc = "## Safety"]
13563#[doc = " * Neon intrinsic unsafe"]
13564#[inline(always)]
13565#[target_feature(enable = "neon,lut")]
13566#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13567#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13568#[rustc_legacy_const_generics(2)]
13569pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13570 static_assert!(LANE >= 0 && LANE <= 1);
13571 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13572}
13573#[doc = "Lookup table read with 4-bit indices"]
13574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13575#[doc = "## Safety"]
13576#[doc = " * Neon intrinsic unsafe"]
13577#[inline(always)]
13578#[target_feature(enable = "neon,lut")]
13579#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13580#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13581#[rustc_legacy_const_generics(2)]
13582pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13583 static_assert!(LANE >= 0 && LANE <= 1);
13584 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13585}
13586#[doc = "Maximum (vector)"]
13587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13588#[inline(always)]
13589#[target_feature(enable = "neon")]
13590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13591#[cfg_attr(test, assert_instr(fmax))]
13592pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13593 unsafe extern "unadjusted" {
13594 #[cfg_attr(
13595 any(target_arch = "aarch64", target_arch = "arm64ec"),
13596 link_name = "llvm.aarch64.neon.fmax.v1f64"
13597 )]
13598 fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13599 }
13600 unsafe { _vmax_f64(a, b) }
13601}
13602#[doc = "Maximum (vector)"]
13603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13604#[inline(always)]
13605#[target_feature(enable = "neon")]
13606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13607#[cfg_attr(test, assert_instr(fmax))]
13608pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13609 unsafe extern "unadjusted" {
13610 #[cfg_attr(
13611 any(target_arch = "aarch64", target_arch = "arm64ec"),
13612 link_name = "llvm.aarch64.neon.fmax.v2f64"
13613 )]
13614 fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13615 }
13616 unsafe { _vmaxq_f64(a, b) }
13617}
13618#[doc = "Maximum (vector)"]
13619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13620#[inline(always)]
13621#[target_feature(enable = "neon,fp16")]
13622#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13623#[cfg(not(target_arch = "arm64ec"))]
13624#[cfg_attr(test, assert_instr(fmax))]
13625pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13626 unsafe extern "unadjusted" {
13627 #[cfg_attr(
13628 any(target_arch = "aarch64", target_arch = "arm64ec"),
13629 link_name = "llvm.aarch64.neon.fmax.f16"
13630 )]
13631 fn _vmaxh_f16(a: f16, b: f16) -> f16;
13632 }
13633 unsafe { _vmaxh_f16(a, b) }
13634}
13635#[doc = "Floating-point Maximum Number (vector)"]
13636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13637#[inline(always)]
13638#[target_feature(enable = "neon")]
13639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13640#[cfg_attr(test, assert_instr(fmaxnm))]
13641pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13642 unsafe { simd_fmax(a, b) }
13643}
13644#[doc = "Floating-point Maximum Number (vector)"]
13645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13646#[inline(always)]
13647#[target_feature(enable = "neon")]
13648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13649#[cfg_attr(test, assert_instr(fmaxnm))]
13650pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13651 unsafe { simd_fmax(a, b) }
13652}
13653#[doc = "Floating-point Maximum Number"]
13654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13655#[inline(always)]
13656#[target_feature(enable = "neon,fp16")]
13657#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13658#[cfg(not(target_arch = "arm64ec"))]
13659#[cfg_attr(test, assert_instr(fmaxnm))]
13660pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13661 f16::max(a, b)
13662}
13663#[doc = "Floating-point maximum number across vector"]
13664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13665#[inline(always)]
13666#[target_feature(enable = "neon,fp16")]
13667#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13668#[cfg(not(target_arch = "arm64ec"))]
13669#[cfg_attr(test, assert_instr(fmaxnmv))]
13670pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13671 unsafe { simd_reduce_max(a) }
13672}
13673#[doc = "Floating-point maximum number across vector"]
13674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13675#[inline(always)]
13676#[target_feature(enable = "neon,fp16")]
13677#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13678#[cfg(not(target_arch = "arm64ec"))]
13679#[cfg_attr(test, assert_instr(fmaxnmv))]
13680pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13681 unsafe { simd_reduce_max(a) }
13682}
13683#[doc = "Floating-point maximum number across vector"]
13684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13685#[inline(always)]
13686#[target_feature(enable = "neon")]
13687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13688#[cfg_attr(test, assert_instr(fmaxnmp))]
13689pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13690 unsafe { simd_reduce_max(a) }
13691}
13692#[doc = "Floating-point maximum number across vector"]
13693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13694#[inline(always)]
13695#[target_feature(enable = "neon")]
13696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13697#[cfg_attr(test, assert_instr(fmaxnmp))]
13698pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13699 unsafe { simd_reduce_max(a) }
13700}
13701#[doc = "Floating-point maximum number across vector"]
13702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13703#[inline(always)]
13704#[target_feature(enable = "neon")]
13705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13706#[cfg_attr(test, assert_instr(fmaxnmv))]
13707pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13708 unsafe { simd_reduce_max(a) }
13709}
13710#[doc = "Floating-point maximum number across vector"]
13711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13712#[inline(always)]
13713#[target_feature(enable = "neon,fp16")]
13714#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13715#[cfg(not(target_arch = "arm64ec"))]
13716#[cfg_attr(test, assert_instr(fmaxv))]
13717pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13718 unsafe extern "unadjusted" {
13719 #[cfg_attr(
13720 any(target_arch = "aarch64", target_arch = "arm64ec"),
13721 link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13722 )]
13723 fn _vmaxv_f16(a: float16x4_t) -> f16;
13724 }
13725 unsafe { _vmaxv_f16(a) }
13726}
13727#[doc = "Floating-point maximum number across vector"]
13728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13729#[inline(always)]
13730#[target_feature(enable = "neon,fp16")]
13731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13732#[cfg(not(target_arch = "arm64ec"))]
13733#[cfg_attr(test, assert_instr(fmaxv))]
13734pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13735 unsafe extern "unadjusted" {
13736 #[cfg_attr(
13737 any(target_arch = "aarch64", target_arch = "arm64ec"),
13738 link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13739 )]
13740 fn _vmaxvq_f16(a: float16x8_t) -> f16;
13741 }
13742 unsafe { _vmaxvq_f16(a) }
13743}
13744#[doc = "Horizontal vector max."]
13745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13746#[inline(always)]
13747#[target_feature(enable = "neon")]
13748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13749#[cfg_attr(test, assert_instr(fmaxp))]
13750pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13751 unsafe extern "unadjusted" {
13752 #[cfg_attr(
13753 any(target_arch = "aarch64", target_arch = "arm64ec"),
13754 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13755 )]
13756 fn _vmaxv_f32(a: float32x2_t) -> f32;
13757 }
13758 unsafe { _vmaxv_f32(a) }
13759}
13760#[doc = "Horizontal vector max."]
13761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13762#[inline(always)]
13763#[target_feature(enable = "neon")]
13764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13765#[cfg_attr(test, assert_instr(fmaxv))]
13766pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13767 unsafe extern "unadjusted" {
13768 #[cfg_attr(
13769 any(target_arch = "aarch64", target_arch = "arm64ec"),
13770 link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13771 )]
13772 fn _vmaxvq_f32(a: float32x4_t) -> f32;
13773 }
13774 unsafe { _vmaxvq_f32(a) }
13775}
13776#[doc = "Horizontal vector max."]
13777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13778#[inline(always)]
13779#[target_feature(enable = "neon")]
13780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13781#[cfg_attr(test, assert_instr(fmaxp))]
13782pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13783 unsafe extern "unadjusted" {
13784 #[cfg_attr(
13785 any(target_arch = "aarch64", target_arch = "arm64ec"),
13786 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13787 )]
13788 fn _vmaxvq_f64(a: float64x2_t) -> f64;
13789 }
13790 unsafe { _vmaxvq_f64(a) }
13791}
13792#[doc = "Horizontal vector max."]
13793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13794#[inline(always)]
13795#[target_feature(enable = "neon")]
13796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13797#[cfg_attr(test, assert_instr(smaxv))]
13798pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13799 unsafe { simd_reduce_max(a) }
13800}
13801#[doc = "Horizontal vector max."]
13802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13803#[inline(always)]
13804#[target_feature(enable = "neon")]
13805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13806#[cfg_attr(test, assert_instr(smaxv))]
13807pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13808 unsafe { simd_reduce_max(a) }
13809}
13810#[doc = "Horizontal vector max."]
13811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13812#[inline(always)]
13813#[target_feature(enable = "neon")]
13814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13815#[cfg_attr(test, assert_instr(smaxv))]
13816pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13817 unsafe { simd_reduce_max(a) }
13818}
13819#[doc = "Horizontal vector max."]
13820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13821#[inline(always)]
13822#[target_feature(enable = "neon")]
13823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13824#[cfg_attr(test, assert_instr(smaxv))]
13825pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13826 unsafe { simd_reduce_max(a) }
13827}
13828#[doc = "Horizontal vector max."]
13829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13830#[inline(always)]
13831#[target_feature(enable = "neon")]
13832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13833#[cfg_attr(test, assert_instr(smaxp))]
13834pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13835 unsafe { simd_reduce_max(a) }
13836}
13837#[doc = "Horizontal vector max."]
13838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13839#[inline(always)]
13840#[target_feature(enable = "neon")]
13841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13842#[cfg_attr(test, assert_instr(smaxv))]
13843pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13844 unsafe { simd_reduce_max(a) }
13845}
13846#[doc = "Horizontal vector max."]
13847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13848#[inline(always)]
13849#[target_feature(enable = "neon")]
13850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13851#[cfg_attr(test, assert_instr(umaxv))]
13852pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13853 unsafe { simd_reduce_max(a) }
13854}
13855#[doc = "Horizontal vector max."]
13856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13857#[inline(always)]
13858#[target_feature(enable = "neon")]
13859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13860#[cfg_attr(test, assert_instr(umaxv))]
13861pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13862 unsafe { simd_reduce_max(a) }
13863}
13864#[doc = "Horizontal vector max."]
13865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13866#[inline(always)]
13867#[target_feature(enable = "neon")]
13868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13869#[cfg_attr(test, assert_instr(umaxv))]
13870pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13871 unsafe { simd_reduce_max(a) }
13872}
13873#[doc = "Horizontal vector max."]
13874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13875#[inline(always)]
13876#[target_feature(enable = "neon")]
13877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13878#[cfg_attr(test, assert_instr(umaxv))]
13879pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13880 unsafe { simd_reduce_max(a) }
13881}
13882#[doc = "Horizontal vector max."]
13883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13884#[inline(always)]
13885#[target_feature(enable = "neon")]
13886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13887#[cfg_attr(test, assert_instr(umaxp))]
13888pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13889 unsafe { simd_reduce_max(a) }
13890}
13891#[doc = "Horizontal vector max."]
13892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13893#[inline(always)]
13894#[target_feature(enable = "neon")]
13895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13896#[cfg_attr(test, assert_instr(umaxv))]
13897pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13898 unsafe { simd_reduce_max(a) }
13899}
13900#[doc = "Minimum (vector)"]
13901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13902#[inline(always)]
13903#[target_feature(enable = "neon")]
13904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13905#[cfg_attr(test, assert_instr(fmin))]
13906pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13907 unsafe extern "unadjusted" {
13908 #[cfg_attr(
13909 any(target_arch = "aarch64", target_arch = "arm64ec"),
13910 link_name = "llvm.aarch64.neon.fmin.v1f64"
13911 )]
13912 fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13913 }
13914 unsafe { _vmin_f64(a, b) }
13915}
13916#[doc = "Minimum (vector)"]
13917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13918#[inline(always)]
13919#[target_feature(enable = "neon")]
13920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13921#[cfg_attr(test, assert_instr(fmin))]
13922pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13923 unsafe extern "unadjusted" {
13924 #[cfg_attr(
13925 any(target_arch = "aarch64", target_arch = "arm64ec"),
13926 link_name = "llvm.aarch64.neon.fmin.v2f64"
13927 )]
13928 fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13929 }
13930 unsafe { _vminq_f64(a, b) }
13931}
13932#[doc = "Minimum (vector)"]
13933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13934#[inline(always)]
13935#[target_feature(enable = "neon,fp16")]
13936#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13937#[cfg(not(target_arch = "arm64ec"))]
13938#[cfg_attr(test, assert_instr(fmin))]
13939pub fn vminh_f16(a: f16, b: f16) -> f16 {
13940 unsafe extern "unadjusted" {
13941 #[cfg_attr(
13942 any(target_arch = "aarch64", target_arch = "arm64ec"),
13943 link_name = "llvm.aarch64.neon.fmin.f16"
13944 )]
13945 fn _vminh_f16(a: f16, b: f16) -> f16;
13946 }
13947 unsafe { _vminh_f16(a, b) }
13948}
13949#[doc = "Floating-point Minimum Number (vector)"]
13950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13951#[inline(always)]
13952#[target_feature(enable = "neon")]
13953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13954#[cfg_attr(test, assert_instr(fminnm))]
13955pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13956 unsafe { simd_fmin(a, b) }
13957}
13958#[doc = "Floating-point Minimum Number (vector)"]
13959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13960#[inline(always)]
13961#[target_feature(enable = "neon")]
13962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13963#[cfg_attr(test, assert_instr(fminnm))]
13964pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13965 unsafe { simd_fmin(a, b) }
13966}
13967#[doc = "Floating-point Minimum Number"]
13968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13969#[inline(always)]
13970#[target_feature(enable = "neon,fp16")]
13971#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13972#[cfg(not(target_arch = "arm64ec"))]
13973#[cfg_attr(test, assert_instr(fminnm))]
13974pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13975 f16::min(a, b)
13976}
13977#[doc = "Floating-point minimum number across vector"]
13978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13979#[inline(always)]
13980#[target_feature(enable = "neon,fp16")]
13981#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13982#[cfg(not(target_arch = "arm64ec"))]
13983#[cfg_attr(test, assert_instr(fminnmv))]
13984pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13985 unsafe { simd_reduce_min(a) }
13986}
13987#[doc = "Floating-point minimum number across vector"]
13988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13989#[inline(always)]
13990#[target_feature(enable = "neon,fp16")]
13991#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13992#[cfg(not(target_arch = "arm64ec"))]
13993#[cfg_attr(test, assert_instr(fminnmv))]
13994pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13995 unsafe { simd_reduce_min(a) }
13996}
13997#[doc = "Floating-point minimum number across vector"]
13998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13999#[inline(always)]
14000#[target_feature(enable = "neon")]
14001#[cfg_attr(test, assert_instr(fminnmp))]
14002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14003pub fn vminnmv_f32(a: float32x2_t) -> f32 {
14004 unsafe { simd_reduce_min(a) }
14005}
14006#[doc = "Floating-point minimum number across vector"]
14007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
14008#[inline(always)]
14009#[target_feature(enable = "neon")]
14010#[cfg_attr(test, assert_instr(fminnmp))]
14011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14012pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
14013 unsafe { simd_reduce_min(a) }
14014}
14015#[doc = "Floating-point minimum number across vector"]
14016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
14017#[inline(always)]
14018#[target_feature(enable = "neon")]
14019#[cfg_attr(test, assert_instr(fminnmv))]
14020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14021pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
14022 unsafe { simd_reduce_min(a) }
14023}
14024#[doc = "Floating-point minimum number across vector"]
14025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
14026#[inline(always)]
14027#[target_feature(enable = "neon,fp16")]
14028#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14029#[cfg(not(target_arch = "arm64ec"))]
14030#[cfg_attr(test, assert_instr(fminv))]
14031pub fn vminv_f16(a: float16x4_t) -> f16 {
14032 unsafe extern "unadjusted" {
14033 #[cfg_attr(
14034 any(target_arch = "aarch64", target_arch = "arm64ec"),
14035 link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
14036 )]
14037 fn _vminv_f16(a: float16x4_t) -> f16;
14038 }
14039 unsafe { _vminv_f16(a) }
14040}
14041#[doc = "Floating-point minimum number across vector"]
14042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
14043#[inline(always)]
14044#[target_feature(enable = "neon,fp16")]
14045#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14046#[cfg(not(target_arch = "arm64ec"))]
14047#[cfg_attr(test, assert_instr(fminv))]
14048pub fn vminvq_f16(a: float16x8_t) -> f16 {
14049 unsafe extern "unadjusted" {
14050 #[cfg_attr(
14051 any(target_arch = "aarch64", target_arch = "arm64ec"),
14052 link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
14053 )]
14054 fn _vminvq_f16(a: float16x8_t) -> f16;
14055 }
14056 unsafe { _vminvq_f16(a) }
14057}
14058#[doc = "Horizontal vector min."]
14059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
14060#[inline(always)]
14061#[target_feature(enable = "neon")]
14062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14063#[cfg_attr(test, assert_instr(fminp))]
14064pub fn vminv_f32(a: float32x2_t) -> f32 {
14065 unsafe extern "unadjusted" {
14066 #[cfg_attr(
14067 any(target_arch = "aarch64", target_arch = "arm64ec"),
14068 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
14069 )]
14070 fn _vminv_f32(a: float32x2_t) -> f32;
14071 }
14072 unsafe { _vminv_f32(a) }
14073}
14074#[doc = "Horizontal vector min."]
14075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
14076#[inline(always)]
14077#[target_feature(enable = "neon")]
14078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14079#[cfg_attr(test, assert_instr(fminv))]
14080pub fn vminvq_f32(a: float32x4_t) -> f32 {
14081 unsafe extern "unadjusted" {
14082 #[cfg_attr(
14083 any(target_arch = "aarch64", target_arch = "arm64ec"),
14084 link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
14085 )]
14086 fn _vminvq_f32(a: float32x4_t) -> f32;
14087 }
14088 unsafe { _vminvq_f32(a) }
14089}
14090#[doc = "Horizontal vector min."]
14091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
14092#[inline(always)]
14093#[target_feature(enable = "neon")]
14094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14095#[cfg_attr(test, assert_instr(fminp))]
14096pub fn vminvq_f64(a: float64x2_t) -> f64 {
14097 unsafe extern "unadjusted" {
14098 #[cfg_attr(
14099 any(target_arch = "aarch64", target_arch = "arm64ec"),
14100 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
14101 )]
14102 fn _vminvq_f64(a: float64x2_t) -> f64;
14103 }
14104 unsafe { _vminvq_f64(a) }
14105}
14106#[doc = "Horizontal vector min."]
14107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14108#[inline(always)]
14109#[target_feature(enable = "neon")]
14110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14111#[cfg_attr(test, assert_instr(sminv))]
14112pub fn vminv_s8(a: int8x8_t) -> i8 {
14113 unsafe { simd_reduce_min(a) }
14114}
14115#[doc = "Horizontal vector min."]
14116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14117#[inline(always)]
14118#[target_feature(enable = "neon")]
14119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14120#[cfg_attr(test, assert_instr(sminv))]
14121pub fn vminvq_s8(a: int8x16_t) -> i8 {
14122 unsafe { simd_reduce_min(a) }
14123}
14124#[doc = "Horizontal vector min."]
14125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14126#[inline(always)]
14127#[target_feature(enable = "neon")]
14128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14129#[cfg_attr(test, assert_instr(sminv))]
14130pub fn vminv_s16(a: int16x4_t) -> i16 {
14131 unsafe { simd_reduce_min(a) }
14132}
14133#[doc = "Horizontal vector min."]
14134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14135#[inline(always)]
14136#[target_feature(enable = "neon")]
14137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14138#[cfg_attr(test, assert_instr(sminv))]
14139pub fn vminvq_s16(a: int16x8_t) -> i16 {
14140 unsafe { simd_reduce_min(a) }
14141}
14142#[doc = "Horizontal vector min."]
14143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14144#[inline(always)]
14145#[target_feature(enable = "neon")]
14146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14147#[cfg_attr(test, assert_instr(sminp))]
14148pub fn vminv_s32(a: int32x2_t) -> i32 {
14149 unsafe { simd_reduce_min(a) }
14150}
14151#[doc = "Horizontal vector min."]
14152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14153#[inline(always)]
14154#[target_feature(enable = "neon")]
14155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14156#[cfg_attr(test, assert_instr(sminv))]
14157pub fn vminvq_s32(a: int32x4_t) -> i32 {
14158 unsafe { simd_reduce_min(a) }
14159}
14160#[doc = "Horizontal vector min."]
14161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14162#[inline(always)]
14163#[target_feature(enable = "neon")]
14164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14165#[cfg_attr(test, assert_instr(uminv))]
14166pub fn vminv_u8(a: uint8x8_t) -> u8 {
14167 unsafe { simd_reduce_min(a) }
14168}
14169#[doc = "Horizontal vector min."]
14170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14171#[inline(always)]
14172#[target_feature(enable = "neon")]
14173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14174#[cfg_attr(test, assert_instr(uminv))]
14175pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14176 unsafe { simd_reduce_min(a) }
14177}
14178#[doc = "Horizontal vector min."]
14179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14180#[inline(always)]
14181#[target_feature(enable = "neon")]
14182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14183#[cfg_attr(test, assert_instr(uminv))]
14184pub fn vminv_u16(a: uint16x4_t) -> u16 {
14185 unsafe { simd_reduce_min(a) }
14186}
14187#[doc = "Horizontal vector min."]
14188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14189#[inline(always)]
14190#[target_feature(enable = "neon")]
14191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14192#[cfg_attr(test, assert_instr(uminv))]
14193pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14194 unsafe { simd_reduce_min(a) }
14195}
14196#[doc = "Horizontal vector min."]
14197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14198#[inline(always)]
14199#[target_feature(enable = "neon")]
14200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14201#[cfg_attr(test, assert_instr(uminp))]
14202pub fn vminv_u32(a: uint32x2_t) -> u32 {
14203 unsafe { simd_reduce_min(a) }
14204}
14205#[doc = "Horizontal vector min."]
14206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14207#[inline(always)]
14208#[target_feature(enable = "neon")]
14209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14210#[cfg_attr(test, assert_instr(uminv))]
14211pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14212 unsafe { simd_reduce_min(a) }
14213}
14214#[doc = "Floating-point multiply-add to accumulator"]
14215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14216#[inline(always)]
14217#[target_feature(enable = "neon")]
14218#[cfg_attr(test, assert_instr(fmul))]
14219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14220pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14221 unsafe { simd_add(a, simd_mul(b, c)) }
14222}
14223#[doc = "Floating-point multiply-add to accumulator"]
14224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14225#[inline(always)]
14226#[target_feature(enable = "neon")]
14227#[cfg_attr(test, assert_instr(fmul))]
14228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14229pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14230 unsafe { simd_add(a, simd_mul(b, c)) }
14231}
14232#[doc = "Multiply-add long"]
14233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14234#[inline(always)]
14235#[target_feature(enable = "neon")]
14236#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14237#[rustc_legacy_const_generics(3)]
14238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14239pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14240 static_assert_uimm_bits!(LANE, 2);
14241 unsafe {
14242 vmlal_high_s16(
14243 a,
14244 b,
14245 simd_shuffle!(
14246 c,
14247 c,
14248 [
14249 LANE as u32,
14250 LANE as u32,
14251 LANE as u32,
14252 LANE as u32,
14253 LANE as u32,
14254 LANE as u32,
14255 LANE as u32,
14256 LANE as u32
14257 ]
14258 ),
14259 )
14260 }
14261}
14262#[doc = "Multiply-add long"]
14263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14264#[inline(always)]
14265#[target_feature(enable = "neon")]
14266#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14267#[rustc_legacy_const_generics(3)]
14268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14269pub fn vmlal_high_laneq_s16<const LANE: i32>(
14270 a: int32x4_t,
14271 b: int16x8_t,
14272 c: int16x8_t,
14273) -> int32x4_t {
14274 static_assert_uimm_bits!(LANE, 3);
14275 unsafe {
14276 vmlal_high_s16(
14277 a,
14278 b,
14279 simd_shuffle!(
14280 c,
14281 c,
14282 [
14283 LANE as u32,
14284 LANE as u32,
14285 LANE as u32,
14286 LANE as u32,
14287 LANE as u32,
14288 LANE as u32,
14289 LANE as u32,
14290 LANE as u32
14291 ]
14292 ),
14293 )
14294 }
14295}
14296#[doc = "Multiply-add long"]
14297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14298#[inline(always)]
14299#[target_feature(enable = "neon")]
14300#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14301#[rustc_legacy_const_generics(3)]
14302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14303pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14304 static_assert_uimm_bits!(LANE, 1);
14305 unsafe {
14306 vmlal_high_s32(
14307 a,
14308 b,
14309 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14310 )
14311 }
14312}
14313#[doc = "Multiply-add long"]
14314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14315#[inline(always)]
14316#[target_feature(enable = "neon")]
14317#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14318#[rustc_legacy_const_generics(3)]
14319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14320pub fn vmlal_high_laneq_s32<const LANE: i32>(
14321 a: int64x2_t,
14322 b: int32x4_t,
14323 c: int32x4_t,
14324) -> int64x2_t {
14325 static_assert_uimm_bits!(LANE, 2);
14326 unsafe {
14327 vmlal_high_s32(
14328 a,
14329 b,
14330 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14331 )
14332 }
14333}
14334#[doc = "Multiply-add long"]
14335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14336#[inline(always)]
14337#[target_feature(enable = "neon")]
14338#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14339#[rustc_legacy_const_generics(3)]
14340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14341pub fn vmlal_high_lane_u16<const LANE: i32>(
14342 a: uint32x4_t,
14343 b: uint16x8_t,
14344 c: uint16x4_t,
14345) -> uint32x4_t {
14346 static_assert_uimm_bits!(LANE, 2);
14347 unsafe {
14348 vmlal_high_u16(
14349 a,
14350 b,
14351 simd_shuffle!(
14352 c,
14353 c,
14354 [
14355 LANE as u32,
14356 LANE as u32,
14357 LANE as u32,
14358 LANE as u32,
14359 LANE as u32,
14360 LANE as u32,
14361 LANE as u32,
14362 LANE as u32
14363 ]
14364 ),
14365 )
14366 }
14367}
14368#[doc = "Multiply-add long"]
14369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14370#[inline(always)]
14371#[target_feature(enable = "neon")]
14372#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14373#[rustc_legacy_const_generics(3)]
14374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14375pub fn vmlal_high_laneq_u16<const LANE: i32>(
14376 a: uint32x4_t,
14377 b: uint16x8_t,
14378 c: uint16x8_t,
14379) -> uint32x4_t {
14380 static_assert_uimm_bits!(LANE, 3);
14381 unsafe {
14382 vmlal_high_u16(
14383 a,
14384 b,
14385 simd_shuffle!(
14386 c,
14387 c,
14388 [
14389 LANE as u32,
14390 LANE as u32,
14391 LANE as u32,
14392 LANE as u32,
14393 LANE as u32,
14394 LANE as u32,
14395 LANE as u32,
14396 LANE as u32
14397 ]
14398 ),
14399 )
14400 }
14401}
14402#[doc = "Multiply-add long"]
14403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14404#[inline(always)]
14405#[target_feature(enable = "neon")]
14406#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14407#[rustc_legacy_const_generics(3)]
14408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14409pub fn vmlal_high_lane_u32<const LANE: i32>(
14410 a: uint64x2_t,
14411 b: uint32x4_t,
14412 c: uint32x2_t,
14413) -> uint64x2_t {
14414 static_assert_uimm_bits!(LANE, 1);
14415 unsafe {
14416 vmlal_high_u32(
14417 a,
14418 b,
14419 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14420 )
14421 }
14422}
14423#[doc = "Multiply-add long"]
14424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14425#[inline(always)]
14426#[target_feature(enable = "neon")]
14427#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14428#[rustc_legacy_const_generics(3)]
14429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14430pub fn vmlal_high_laneq_u32<const LANE: i32>(
14431 a: uint64x2_t,
14432 b: uint32x4_t,
14433 c: uint32x4_t,
14434) -> uint64x2_t {
14435 static_assert_uimm_bits!(LANE, 2);
14436 unsafe {
14437 vmlal_high_u32(
14438 a,
14439 b,
14440 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14441 )
14442 }
14443}
14444#[doc = "Multiply-add long"]
14445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14446#[inline(always)]
14447#[target_feature(enable = "neon")]
14448#[cfg_attr(test, assert_instr(smlal2))]
14449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14450pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14451 vmlal_high_s16(a, b, vdupq_n_s16(c))
14452}
14453#[doc = "Multiply-add long"]
14454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14455#[inline(always)]
14456#[target_feature(enable = "neon")]
14457#[cfg_attr(test, assert_instr(smlal2))]
14458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14459pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14460 vmlal_high_s32(a, b, vdupq_n_s32(c))
14461}
14462#[doc = "Multiply-add long"]
14463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14464#[inline(always)]
14465#[target_feature(enable = "neon")]
14466#[cfg_attr(test, assert_instr(umlal2))]
14467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14468pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14469 vmlal_high_u16(a, b, vdupq_n_u16(c))
14470}
14471#[doc = "Multiply-add long"]
14472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14473#[inline(always)]
14474#[target_feature(enable = "neon")]
14475#[cfg_attr(test, assert_instr(umlal2))]
14476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14477pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14478 vmlal_high_u32(a, b, vdupq_n_u32(c))
14479}
14480#[doc = "Signed multiply-add long"]
14481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14482#[inline(always)]
14483#[target_feature(enable = "neon")]
14484#[cfg_attr(test, assert_instr(smlal2))]
14485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14486pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14487 unsafe {
14488 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14489 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14490 vmlal_s8(a, b, c)
14491 }
14492}
14493#[doc = "Signed multiply-add long"]
14494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14495#[inline(always)]
14496#[target_feature(enable = "neon")]
14497#[cfg_attr(test, assert_instr(smlal2))]
14498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14499pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14500 unsafe {
14501 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14502 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14503 vmlal_s16(a, b, c)
14504 }
14505}
14506#[doc = "Signed multiply-add long"]
14507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14508#[inline(always)]
14509#[target_feature(enable = "neon")]
14510#[cfg_attr(test, assert_instr(smlal2))]
14511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14512pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14513 unsafe {
14514 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14515 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14516 vmlal_s32(a, b, c)
14517 }
14518}
14519#[doc = "Unsigned multiply-add long"]
14520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14521#[inline(always)]
14522#[target_feature(enable = "neon")]
14523#[cfg_attr(test, assert_instr(umlal2))]
14524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14525pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14526 unsafe {
14527 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14528 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14529 vmlal_u8(a, b, c)
14530 }
14531}
14532#[doc = "Unsigned multiply-add long"]
14533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14534#[inline(always)]
14535#[target_feature(enable = "neon")]
14536#[cfg_attr(test, assert_instr(umlal2))]
14537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14538pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14539 unsafe {
14540 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14541 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14542 vmlal_u16(a, b, c)
14543 }
14544}
14545#[doc = "Unsigned multiply-add long"]
14546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14547#[inline(always)]
14548#[target_feature(enable = "neon")]
14549#[cfg_attr(test, assert_instr(umlal2))]
14550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14551pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14552 unsafe {
14553 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14554 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14555 vmlal_u32(a, b, c)
14556 }
14557}
14558#[doc = "Floating-point multiply-subtract from accumulator"]
14559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14560#[inline(always)]
14561#[target_feature(enable = "neon")]
14562#[cfg_attr(test, assert_instr(fmul))]
14563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14564pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14565 unsafe { simd_sub(a, simd_mul(b, c)) }
14566}
14567#[doc = "Floating-point multiply-subtract from accumulator"]
14568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14569#[inline(always)]
14570#[target_feature(enable = "neon")]
14571#[cfg_attr(test, assert_instr(fmul))]
14572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14573pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14574 unsafe { simd_sub(a, simd_mul(b, c)) }
14575}
14576#[doc = "Multiply-subtract long"]
14577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14578#[inline(always)]
14579#[target_feature(enable = "neon")]
14580#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14581#[rustc_legacy_const_generics(3)]
14582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14583pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14584 static_assert_uimm_bits!(LANE, 2);
14585 unsafe {
14586 vmlsl_high_s16(
14587 a,
14588 b,
14589 simd_shuffle!(
14590 c,
14591 c,
14592 [
14593 LANE as u32,
14594 LANE as u32,
14595 LANE as u32,
14596 LANE as u32,
14597 LANE as u32,
14598 LANE as u32,
14599 LANE as u32,
14600 LANE as u32
14601 ]
14602 ),
14603 )
14604 }
14605}
14606#[doc = "Multiply-subtract long"]
14607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14608#[inline(always)]
14609#[target_feature(enable = "neon")]
14610#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14611#[rustc_legacy_const_generics(3)]
14612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14613pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14614 a: int32x4_t,
14615 b: int16x8_t,
14616 c: int16x8_t,
14617) -> int32x4_t {
14618 static_assert_uimm_bits!(LANE, 3);
14619 unsafe {
14620 vmlsl_high_s16(
14621 a,
14622 b,
14623 simd_shuffle!(
14624 c,
14625 c,
14626 [
14627 LANE as u32,
14628 LANE as u32,
14629 LANE as u32,
14630 LANE as u32,
14631 LANE as u32,
14632 LANE as u32,
14633 LANE as u32,
14634 LANE as u32
14635 ]
14636 ),
14637 )
14638 }
14639}
14640#[doc = "Multiply-subtract long"]
14641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14642#[inline(always)]
14643#[target_feature(enable = "neon")]
14644#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14645#[rustc_legacy_const_generics(3)]
14646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14647pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14648 static_assert_uimm_bits!(LANE, 1);
14649 unsafe {
14650 vmlsl_high_s32(
14651 a,
14652 b,
14653 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14654 )
14655 }
14656}
14657#[doc = "Multiply-subtract long"]
14658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14659#[inline(always)]
14660#[target_feature(enable = "neon")]
14661#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14662#[rustc_legacy_const_generics(3)]
14663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14664pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14665 a: int64x2_t,
14666 b: int32x4_t,
14667 c: int32x4_t,
14668) -> int64x2_t {
14669 static_assert_uimm_bits!(LANE, 2);
14670 unsafe {
14671 vmlsl_high_s32(
14672 a,
14673 b,
14674 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14675 )
14676 }
14677}
14678#[doc = "Multiply-subtract long"]
14679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14680#[inline(always)]
14681#[target_feature(enable = "neon")]
14682#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14683#[rustc_legacy_const_generics(3)]
14684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14685pub fn vmlsl_high_lane_u16<const LANE: i32>(
14686 a: uint32x4_t,
14687 b: uint16x8_t,
14688 c: uint16x4_t,
14689) -> uint32x4_t {
14690 static_assert_uimm_bits!(LANE, 2);
14691 unsafe {
14692 vmlsl_high_u16(
14693 a,
14694 b,
14695 simd_shuffle!(
14696 c,
14697 c,
14698 [
14699 LANE as u32,
14700 LANE as u32,
14701 LANE as u32,
14702 LANE as u32,
14703 LANE as u32,
14704 LANE as u32,
14705 LANE as u32,
14706 LANE as u32
14707 ]
14708 ),
14709 )
14710 }
14711}
14712#[doc = "Multiply-subtract long"]
14713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14714#[inline(always)]
14715#[target_feature(enable = "neon")]
14716#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14717#[rustc_legacy_const_generics(3)]
14718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14719pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14720 a: uint32x4_t,
14721 b: uint16x8_t,
14722 c: uint16x8_t,
14723) -> uint32x4_t {
14724 static_assert_uimm_bits!(LANE, 3);
14725 unsafe {
14726 vmlsl_high_u16(
14727 a,
14728 b,
14729 simd_shuffle!(
14730 c,
14731 c,
14732 [
14733 LANE as u32,
14734 LANE as u32,
14735 LANE as u32,
14736 LANE as u32,
14737 LANE as u32,
14738 LANE as u32,
14739 LANE as u32,
14740 LANE as u32
14741 ]
14742 ),
14743 )
14744 }
14745}
14746#[doc = "Multiply-subtract long"]
14747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14748#[inline(always)]
14749#[target_feature(enable = "neon")]
14750#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14751#[rustc_legacy_const_generics(3)]
14752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14753pub fn vmlsl_high_lane_u32<const LANE: i32>(
14754 a: uint64x2_t,
14755 b: uint32x4_t,
14756 c: uint32x2_t,
14757) -> uint64x2_t {
14758 static_assert_uimm_bits!(LANE, 1);
14759 unsafe {
14760 vmlsl_high_u32(
14761 a,
14762 b,
14763 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14764 )
14765 }
14766}
14767#[doc = "Multiply-subtract long"]
14768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14769#[inline(always)]
14770#[target_feature(enable = "neon")]
14771#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14772#[rustc_legacy_const_generics(3)]
14773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14774pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14775 a: uint64x2_t,
14776 b: uint32x4_t,
14777 c: uint32x4_t,
14778) -> uint64x2_t {
14779 static_assert_uimm_bits!(LANE, 2);
14780 unsafe {
14781 vmlsl_high_u32(
14782 a,
14783 b,
14784 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14785 )
14786 }
14787}
14788#[doc = "Multiply-subtract long"]
14789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14790#[inline(always)]
14791#[target_feature(enable = "neon")]
14792#[cfg_attr(test, assert_instr(smlsl2))]
14793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14794pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14795 vmlsl_high_s16(a, b, vdupq_n_s16(c))
14796}
14797#[doc = "Multiply-subtract long"]
14798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14799#[inline(always)]
14800#[target_feature(enable = "neon")]
14801#[cfg_attr(test, assert_instr(smlsl2))]
14802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14803pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14804 vmlsl_high_s32(a, b, vdupq_n_s32(c))
14805}
14806#[doc = "Multiply-subtract long"]
14807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14808#[inline(always)]
14809#[target_feature(enable = "neon")]
14810#[cfg_attr(test, assert_instr(umlsl2))]
14811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14812pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14813 vmlsl_high_u16(a, b, vdupq_n_u16(c))
14814}
14815#[doc = "Multiply-subtract long"]
14816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14817#[inline(always)]
14818#[target_feature(enable = "neon")]
14819#[cfg_attr(test, assert_instr(umlsl2))]
14820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14821pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14822 vmlsl_high_u32(a, b, vdupq_n_u32(c))
14823}
14824#[doc = "Signed multiply-subtract long"]
14825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14826#[inline(always)]
14827#[target_feature(enable = "neon")]
14828#[cfg_attr(test, assert_instr(smlsl2))]
14829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14830pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14831 unsafe {
14832 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14833 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14834 vmlsl_s8(a, b, c)
14835 }
14836}
14837#[doc = "Signed multiply-subtract long"]
14838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14839#[inline(always)]
14840#[target_feature(enable = "neon")]
14841#[cfg_attr(test, assert_instr(smlsl2))]
14842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14843pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14844 unsafe {
14845 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14846 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14847 vmlsl_s16(a, b, c)
14848 }
14849}
14850#[doc = "Signed multiply-subtract long"]
14851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14852#[inline(always)]
14853#[target_feature(enable = "neon")]
14854#[cfg_attr(test, assert_instr(smlsl2))]
14855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14856pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14857 unsafe {
14858 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14859 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14860 vmlsl_s32(a, b, c)
14861 }
14862}
14863#[doc = "Unsigned multiply-subtract long"]
14864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14865#[inline(always)]
14866#[target_feature(enable = "neon")]
14867#[cfg_attr(test, assert_instr(umlsl2))]
14868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14869pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14870 unsafe {
14871 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14872 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14873 vmlsl_u8(a, b, c)
14874 }
14875}
14876#[doc = "Unsigned multiply-subtract long"]
14877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14878#[inline(always)]
14879#[target_feature(enable = "neon")]
14880#[cfg_attr(test, assert_instr(umlsl2))]
14881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14882pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14883 unsafe {
14884 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14885 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14886 vmlsl_u16(a, b, c)
14887 }
14888}
14889#[doc = "Unsigned multiply-subtract long"]
14890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14891#[inline(always)]
14892#[target_feature(enable = "neon")]
14893#[cfg_attr(test, assert_instr(umlsl2))]
14894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14895pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14896 unsafe {
14897 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14898 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14899 vmlsl_u32(a, b, c)
14900 }
14901}
14902#[doc = "Vector move"]
14903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14904#[inline(always)]
14905#[target_feature(enable = "neon")]
14906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14907#[cfg_attr(test, assert_instr(sxtl2))]
14908pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14909 unsafe {
14910 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14911 vmovl_s8(a)
14912 }
14913}
14914#[doc = "Vector move"]
14915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14916#[inline(always)]
14917#[target_feature(enable = "neon")]
14918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14919#[cfg_attr(test, assert_instr(sxtl2))]
14920pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14921 unsafe {
14922 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14923 vmovl_s16(a)
14924 }
14925}
14926#[doc = "Vector move"]
14927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14928#[inline(always)]
14929#[target_feature(enable = "neon")]
14930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14931#[cfg_attr(test, assert_instr(sxtl2))]
14932pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14933 unsafe {
14934 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14935 vmovl_s32(a)
14936 }
14937}
14938#[doc = "Vector move"]
14939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14940#[inline(always)]
14941#[target_feature(enable = "neon")]
14942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14943#[cfg_attr(test, assert_instr(uxtl2))]
14944pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14945 unsafe {
14946 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14947 vmovl_u8(a)
14948 }
14949}
14950#[doc = "Vector move"]
14951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14952#[inline(always)]
14953#[target_feature(enable = "neon")]
14954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14955#[cfg_attr(test, assert_instr(uxtl2))]
14956pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14957 unsafe {
14958 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14959 vmovl_u16(a)
14960 }
14961}
14962#[doc = "Vector move"]
14963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14964#[inline(always)]
14965#[target_feature(enable = "neon")]
14966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14967#[cfg_attr(test, assert_instr(uxtl2))]
14968pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14969 unsafe {
14970 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14971 vmovl_u32(a)
14972 }
14973}
14974#[doc = "Extract narrow"]
14975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14976#[inline(always)]
14977#[target_feature(enable = "neon")]
14978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14979#[cfg_attr(test, assert_instr(xtn2))]
14980pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14981 unsafe {
14982 let c: int8x8_t = simd_cast(b);
14983 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14984 }
14985}
14986#[doc = "Extract narrow"]
14987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14988#[inline(always)]
14989#[target_feature(enable = "neon")]
14990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14991#[cfg_attr(test, assert_instr(xtn2))]
14992pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14993 unsafe {
14994 let c: int16x4_t = simd_cast(b);
14995 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14996 }
14997}
14998#[doc = "Extract narrow"]
14999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
15000#[inline(always)]
15001#[target_feature(enable = "neon")]
15002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15003#[cfg_attr(test, assert_instr(xtn2))]
15004pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
15005 unsafe {
15006 let c: int32x2_t = simd_cast(b);
15007 simd_shuffle!(a, c, [0, 1, 2, 3])
15008 }
15009}
15010#[doc = "Extract narrow"]
15011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
15012#[inline(always)]
15013#[target_feature(enable = "neon")]
15014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15015#[cfg_attr(test, assert_instr(xtn2))]
15016pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
15017 unsafe {
15018 let c: uint8x8_t = simd_cast(b);
15019 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
15020 }
15021}
15022#[doc = "Extract narrow"]
15023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
15024#[inline(always)]
15025#[target_feature(enable = "neon")]
15026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15027#[cfg_attr(test, assert_instr(xtn2))]
15028pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
15029 unsafe {
15030 let c: uint16x4_t = simd_cast(b);
15031 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
15032 }
15033}
15034#[doc = "Extract narrow"]
15035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
15036#[inline(always)]
15037#[target_feature(enable = "neon")]
15038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15039#[cfg_attr(test, assert_instr(xtn2))]
15040pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
15041 unsafe {
15042 let c: uint32x2_t = simd_cast(b);
15043 simd_shuffle!(a, c, [0, 1, 2, 3])
15044 }
15045}
15046#[doc = "Multiply"]
15047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
15048#[inline(always)]
15049#[target_feature(enable = "neon")]
15050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15051#[cfg_attr(test, assert_instr(fmul))]
15052pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15053 unsafe { simd_mul(a, b) }
15054}
15055#[doc = "Multiply"]
15056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
15057#[inline(always)]
15058#[target_feature(enable = "neon")]
15059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15060#[cfg_attr(test, assert_instr(fmul))]
15061pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15062 unsafe { simd_mul(a, b) }
15063}
15064#[doc = "Floating-point multiply"]
15065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
15066#[inline(always)]
15067#[target_feature(enable = "neon")]
15068#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15069#[rustc_legacy_const_generics(2)]
15070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15071pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15072 static_assert!(LANE == 0);
15073 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15074}
15075#[doc = "Floating-point multiply"]
15076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
15077#[inline(always)]
15078#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15079#[rustc_legacy_const_generics(2)]
15080#[target_feature(enable = "neon,fp16")]
15081#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15082#[cfg(not(target_arch = "arm64ec"))]
15083pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15084 static_assert_uimm_bits!(LANE, 3);
15085 unsafe {
15086 simd_mul(
15087 a,
15088 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15089 )
15090 }
15091}
15092#[doc = "Floating-point multiply"]
15093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
15094#[inline(always)]
15095#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15096#[rustc_legacy_const_generics(2)]
15097#[target_feature(enable = "neon,fp16")]
15098#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15099#[cfg(not(target_arch = "arm64ec"))]
15100pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15101 static_assert_uimm_bits!(LANE, 3);
15102 unsafe {
15103 simd_mul(
15104 a,
15105 simd_shuffle!(
15106 b,
15107 b,
15108 [
15109 LANE as u32,
15110 LANE as u32,
15111 LANE as u32,
15112 LANE as u32,
15113 LANE as u32,
15114 LANE as u32,
15115 LANE as u32,
15116 LANE as u32
15117 ]
15118 ),
15119 )
15120 }
15121}
15122#[doc = "Floating-point multiply"]
15123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
15124#[inline(always)]
15125#[target_feature(enable = "neon")]
15126#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15127#[rustc_legacy_const_generics(2)]
15128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15129pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15130 static_assert_uimm_bits!(LANE, 1);
15131 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15132}
15133#[doc = "Vector multiply by scalar"]
15134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
15135#[inline(always)]
15136#[target_feature(enable = "neon")]
15137#[cfg_attr(test, assert_instr(fmul))]
15138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15139pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
15140 unsafe { simd_mul(a, vdup_n_f64(b)) }
15141}
15142#[doc = "Vector multiply by scalar"]
15143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15144#[inline(always)]
15145#[target_feature(enable = "neon")]
15146#[cfg_attr(test, assert_instr(fmul))]
15147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15148pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15149 unsafe { simd_mul(a, vdupq_n_f64(b)) }
15150}
15151#[doc = "Floating-point multiply"]
15152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15153#[inline(always)]
15154#[target_feature(enable = "neon")]
15155#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15156#[rustc_legacy_const_generics(2)]
15157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15158pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15159 static_assert!(LANE == 0);
15160 unsafe {
15161 let b: f64 = simd_extract!(b, LANE as u32);
15162 a * b
15163 }
15164}
15165#[doc = "Add"]
15166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15167#[inline(always)]
15168#[target_feature(enable = "neon,fp16")]
15169#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15170#[cfg(not(target_arch = "arm64ec"))]
15171#[cfg_attr(test, assert_instr(fmul))]
15172pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15173 a * b
15174}
15175#[doc = "Floating-point multiply"]
15176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15177#[inline(always)]
15178#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15179#[rustc_legacy_const_generics(2)]
15180#[target_feature(enable = "neon,fp16")]
15181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15182#[cfg(not(target_arch = "arm64ec"))]
15183pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15184 static_assert_uimm_bits!(LANE, 2);
15185 unsafe {
15186 let b: f16 = simd_extract!(b, LANE as u32);
15187 a * b
15188 }
15189}
15190#[doc = "Floating-point multiply"]
15191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15192#[inline(always)]
15193#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15194#[rustc_legacy_const_generics(2)]
15195#[target_feature(enable = "neon,fp16")]
15196#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15197#[cfg(not(target_arch = "arm64ec"))]
15198pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15199 static_assert_uimm_bits!(LANE, 3);
15200 unsafe {
15201 let b: f16 = simd_extract!(b, LANE as u32);
15202 a * b
15203 }
15204}
15205#[doc = "Multiply long"]
15206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15207#[inline(always)]
15208#[target_feature(enable = "neon")]
15209#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15210#[rustc_legacy_const_generics(2)]
15211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15212pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15213 static_assert_uimm_bits!(LANE, 2);
15214 unsafe {
15215 vmull_high_s16(
15216 a,
15217 simd_shuffle!(
15218 b,
15219 b,
15220 [
15221 LANE as u32,
15222 LANE as u32,
15223 LANE as u32,
15224 LANE as u32,
15225 LANE as u32,
15226 LANE as u32,
15227 LANE as u32,
15228 LANE as u32
15229 ]
15230 ),
15231 )
15232 }
15233}
15234#[doc = "Multiply long"]
15235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15236#[inline(always)]
15237#[target_feature(enable = "neon")]
15238#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15239#[rustc_legacy_const_generics(2)]
15240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15241pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15242 static_assert_uimm_bits!(LANE, 3);
15243 unsafe {
15244 vmull_high_s16(
15245 a,
15246 simd_shuffle!(
15247 b,
15248 b,
15249 [
15250 LANE as u32,
15251 LANE as u32,
15252 LANE as u32,
15253 LANE as u32,
15254 LANE as u32,
15255 LANE as u32,
15256 LANE as u32,
15257 LANE as u32
15258 ]
15259 ),
15260 )
15261 }
15262}
15263#[doc = "Multiply long"]
15264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15265#[inline(always)]
15266#[target_feature(enable = "neon")]
15267#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15268#[rustc_legacy_const_generics(2)]
15269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15270pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15271 static_assert_uimm_bits!(LANE, 1);
15272 unsafe {
15273 vmull_high_s32(
15274 a,
15275 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15276 )
15277 }
15278}
15279#[doc = "Multiply long"]
15280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15281#[inline(always)]
15282#[target_feature(enable = "neon")]
15283#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15284#[rustc_legacy_const_generics(2)]
15285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15286pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15287 static_assert_uimm_bits!(LANE, 2);
15288 unsafe {
15289 vmull_high_s32(
15290 a,
15291 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15292 )
15293 }
15294}
15295#[doc = "Multiply long"]
15296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15297#[inline(always)]
15298#[target_feature(enable = "neon")]
15299#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15300#[rustc_legacy_const_generics(2)]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15303 static_assert_uimm_bits!(LANE, 2);
15304 unsafe {
15305 vmull_high_u16(
15306 a,
15307 simd_shuffle!(
15308 b,
15309 b,
15310 [
15311 LANE as u32,
15312 LANE as u32,
15313 LANE as u32,
15314 LANE as u32,
15315 LANE as u32,
15316 LANE as u32,
15317 LANE as u32,
15318 LANE as u32
15319 ]
15320 ),
15321 )
15322 }
15323}
15324#[doc = "Multiply long"]
15325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15326#[inline(always)]
15327#[target_feature(enable = "neon")]
15328#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15329#[rustc_legacy_const_generics(2)]
15330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15331pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15332 static_assert_uimm_bits!(LANE, 3);
15333 unsafe {
15334 vmull_high_u16(
15335 a,
15336 simd_shuffle!(
15337 b,
15338 b,
15339 [
15340 LANE as u32,
15341 LANE as u32,
15342 LANE as u32,
15343 LANE as u32,
15344 LANE as u32,
15345 LANE as u32,
15346 LANE as u32,
15347 LANE as u32
15348 ]
15349 ),
15350 )
15351 }
15352}
15353#[doc = "Multiply long"]
15354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15355#[inline(always)]
15356#[target_feature(enable = "neon")]
15357#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15358#[rustc_legacy_const_generics(2)]
15359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15360pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15361 static_assert_uimm_bits!(LANE, 1);
15362 unsafe {
15363 vmull_high_u32(
15364 a,
15365 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15366 )
15367 }
15368}
15369#[doc = "Multiply long"]
15370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15371#[inline(always)]
15372#[target_feature(enable = "neon")]
15373#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15374#[rustc_legacy_const_generics(2)]
15375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15376pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15377 static_assert_uimm_bits!(LANE, 2);
15378 unsafe {
15379 vmull_high_u32(
15380 a,
15381 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15382 )
15383 }
15384}
15385#[doc = "Multiply long"]
15386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15387#[inline(always)]
15388#[target_feature(enable = "neon")]
15389#[cfg_attr(test, assert_instr(smull2))]
15390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15391pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15392 vmull_high_s16(a, vdupq_n_s16(b))
15393}
15394#[doc = "Multiply long"]
15395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15396#[inline(always)]
15397#[target_feature(enable = "neon")]
15398#[cfg_attr(test, assert_instr(smull2))]
15399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15400pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15401 vmull_high_s32(a, vdupq_n_s32(b))
15402}
15403#[doc = "Multiply long"]
15404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15405#[inline(always)]
15406#[target_feature(enable = "neon")]
15407#[cfg_attr(test, assert_instr(umull2))]
15408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15409pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15410 vmull_high_u16(a, vdupq_n_u16(b))
15411}
15412#[doc = "Multiply long"]
15413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15414#[inline(always)]
15415#[target_feature(enable = "neon")]
15416#[cfg_attr(test, assert_instr(umull2))]
15417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15418pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15419 vmull_high_u32(a, vdupq_n_u32(b))
15420}
15421#[doc = "Polynomial multiply long"]
15422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15423#[inline(always)]
15424#[target_feature(enable = "neon,aes")]
15425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15426#[cfg_attr(test, assert_instr(pmull2))]
15427pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15428 unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15429}
15430#[doc = "Polynomial multiply long"]
15431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15432#[inline(always)]
15433#[target_feature(enable = "neon")]
15434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15435#[cfg_attr(test, assert_instr(pmull2))]
15436pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15437 unsafe {
15438 let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15439 let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15440 vmull_p8(a, b)
15441 }
15442}
15443#[doc = "Signed multiply long"]
15444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15445#[inline(always)]
15446#[target_feature(enable = "neon")]
15447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15448#[cfg_attr(test, assert_instr(smull2))]
15449pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15450 unsafe {
15451 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15452 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15453 vmull_s8(a, b)
15454 }
15455}
15456#[doc = "Signed multiply long"]
15457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15458#[inline(always)]
15459#[target_feature(enable = "neon")]
15460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15461#[cfg_attr(test, assert_instr(smull2))]
15462pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15463 unsafe {
15464 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15465 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15466 vmull_s16(a, b)
15467 }
15468}
15469#[doc = "Signed multiply long"]
15470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15471#[inline(always)]
15472#[target_feature(enable = "neon")]
15473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15474#[cfg_attr(test, assert_instr(smull2))]
15475pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15476 unsafe {
15477 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15478 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15479 vmull_s32(a, b)
15480 }
15481}
15482#[doc = "Unsigned multiply long"]
15483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15484#[inline(always)]
15485#[target_feature(enable = "neon")]
15486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15487#[cfg_attr(test, assert_instr(umull2))]
15488pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15489 unsafe {
15490 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15491 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15492 vmull_u8(a, b)
15493 }
15494}
15495#[doc = "Unsigned multiply long"]
15496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15497#[inline(always)]
15498#[target_feature(enable = "neon")]
15499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15500#[cfg_attr(test, assert_instr(umull2))]
15501pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15502 unsafe {
15503 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15504 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15505 vmull_u16(a, b)
15506 }
15507}
15508#[doc = "Unsigned multiply long"]
15509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15510#[inline(always)]
15511#[target_feature(enable = "neon")]
15512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15513#[cfg_attr(test, assert_instr(umull2))]
15514pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15515 unsafe {
15516 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15517 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15518 vmull_u32(a, b)
15519 }
15520}
15521#[doc = "Polynomial multiply long"]
15522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15523#[inline(always)]
15524#[target_feature(enable = "neon,aes")]
15525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15526#[cfg_attr(test, assert_instr(pmull))]
15527pub fn vmull_p64(a: p64, b: p64) -> p128 {
15528 unsafe extern "unadjusted" {
15529 #[cfg_attr(
15530 any(target_arch = "aarch64", target_arch = "arm64ec"),
15531 link_name = "llvm.aarch64.neon.pmull64"
15532 )]
15533 fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15534 }
15535 unsafe { transmute(_vmull_p64(a, b)) }
15536}
15537#[doc = "Floating-point multiply"]
15538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15539#[inline(always)]
15540#[target_feature(enable = "neon")]
15541#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15542#[rustc_legacy_const_generics(2)]
15543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15544pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15545 static_assert!(LANE == 0);
15546 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15547}
15548#[doc = "Floating-point multiply"]
15549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15550#[inline(always)]
15551#[target_feature(enable = "neon")]
15552#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15553#[rustc_legacy_const_generics(2)]
15554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15555pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15556 static_assert_uimm_bits!(LANE, 1);
15557 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15558}
15559#[doc = "Floating-point multiply"]
15560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15561#[inline(always)]
15562#[target_feature(enable = "neon")]
15563#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15564#[rustc_legacy_const_generics(2)]
15565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15566pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15567 static_assert_uimm_bits!(LANE, 1);
15568 unsafe {
15569 let b: f32 = simd_extract!(b, LANE as u32);
15570 a * b
15571 }
15572}
15573#[doc = "Floating-point multiply"]
15574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15575#[inline(always)]
15576#[target_feature(enable = "neon")]
15577#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15578#[rustc_legacy_const_generics(2)]
15579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15580pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15581 static_assert_uimm_bits!(LANE, 2);
15582 unsafe {
15583 let b: f32 = simd_extract!(b, LANE as u32);
15584 a * b
15585 }
15586}
15587#[doc = "Floating-point multiply"]
15588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15589#[inline(always)]
15590#[target_feature(enable = "neon")]
15591#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15592#[rustc_legacy_const_generics(2)]
15593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15594pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15595 static_assert_uimm_bits!(LANE, 1);
15596 unsafe {
15597 let b: f64 = simd_extract!(b, LANE as u32);
15598 a * b
15599 }
15600}
15601#[doc = "Floating-point multiply extended"]
15602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15603#[inline(always)]
15604#[target_feature(enable = "neon,fp16")]
15605#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15606#[cfg(not(target_arch = "arm64ec"))]
15607#[cfg_attr(test, assert_instr(fmulx))]
15608pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15609 unsafe extern "unadjusted" {
15610 #[cfg_attr(
15611 any(target_arch = "aarch64", target_arch = "arm64ec"),
15612 link_name = "llvm.aarch64.neon.fmulx.v4f16"
15613 )]
15614 fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15615 }
15616 unsafe { _vmulx_f16(a, b) }
15617}
15618#[doc = "Floating-point multiply extended"]
15619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15620#[inline(always)]
15621#[target_feature(enable = "neon,fp16")]
15622#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15623#[cfg(not(target_arch = "arm64ec"))]
15624#[cfg_attr(test, assert_instr(fmulx))]
15625pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15626 unsafe extern "unadjusted" {
15627 #[cfg_attr(
15628 any(target_arch = "aarch64", target_arch = "arm64ec"),
15629 link_name = "llvm.aarch64.neon.fmulx.v8f16"
15630 )]
15631 fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15632 }
15633 unsafe { _vmulxq_f16(a, b) }
15634}
15635#[doc = "Floating-point multiply extended"]
15636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15637#[inline(always)]
15638#[target_feature(enable = "neon")]
15639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15640#[cfg_attr(test, assert_instr(fmulx))]
15641pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15642 unsafe extern "unadjusted" {
15643 #[cfg_attr(
15644 any(target_arch = "aarch64", target_arch = "arm64ec"),
15645 link_name = "llvm.aarch64.neon.fmulx.v2f32"
15646 )]
15647 fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15648 }
15649 unsafe { _vmulx_f32(a, b) }
15650}
15651#[doc = "Floating-point multiply extended"]
15652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15653#[inline(always)]
15654#[target_feature(enable = "neon")]
15655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15656#[cfg_attr(test, assert_instr(fmulx))]
15657pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15658 unsafe extern "unadjusted" {
15659 #[cfg_attr(
15660 any(target_arch = "aarch64", target_arch = "arm64ec"),
15661 link_name = "llvm.aarch64.neon.fmulx.v4f32"
15662 )]
15663 fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15664 }
15665 unsafe { _vmulxq_f32(a, b) }
15666}
15667#[doc = "Floating-point multiply extended"]
15668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15669#[inline(always)]
15670#[target_feature(enable = "neon")]
15671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15672#[cfg_attr(test, assert_instr(fmulx))]
15673pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15674 unsafe extern "unadjusted" {
15675 #[cfg_attr(
15676 any(target_arch = "aarch64", target_arch = "arm64ec"),
15677 link_name = "llvm.aarch64.neon.fmulx.v1f64"
15678 )]
15679 fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15680 }
15681 unsafe { _vmulx_f64(a, b) }
15682}
15683#[doc = "Floating-point multiply extended"]
15684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15685#[inline(always)]
15686#[target_feature(enable = "neon")]
15687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15688#[cfg_attr(test, assert_instr(fmulx))]
15689pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15690 unsafe extern "unadjusted" {
15691 #[cfg_attr(
15692 any(target_arch = "aarch64", target_arch = "arm64ec"),
15693 link_name = "llvm.aarch64.neon.fmulx.v2f64"
15694 )]
15695 fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15696 }
15697 unsafe { _vmulxq_f64(a, b) }
15698}
15699#[doc = "Floating-point multiply extended"]
15700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15701#[inline(always)]
15702#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15703#[rustc_legacy_const_generics(2)]
15704#[target_feature(enable = "neon,fp16")]
15705#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15706#[cfg(not(target_arch = "arm64ec"))]
15707pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15708 static_assert_uimm_bits!(LANE, 2);
15709 unsafe {
15710 vmulx_f16(
15711 a,
15712 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15713 )
15714 }
15715}
15716#[doc = "Floating-point multiply extended"]
15717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15718#[inline(always)]
15719#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15720#[rustc_legacy_const_generics(2)]
15721#[target_feature(enable = "neon,fp16")]
15722#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15723#[cfg(not(target_arch = "arm64ec"))]
15724pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15725 static_assert_uimm_bits!(LANE, 3);
15726 unsafe {
15727 vmulx_f16(
15728 a,
15729 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15730 )
15731 }
15732}
15733#[doc = "Floating-point multiply extended"]
15734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15735#[inline(always)]
15736#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15737#[rustc_legacy_const_generics(2)]
15738#[target_feature(enable = "neon,fp16")]
15739#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15740#[cfg(not(target_arch = "arm64ec"))]
15741pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15742 static_assert_uimm_bits!(LANE, 2);
15743 unsafe {
15744 vmulxq_f16(
15745 a,
15746 simd_shuffle!(
15747 b,
15748 b,
15749 [
15750 LANE as u32,
15751 LANE as u32,
15752 LANE as u32,
15753 LANE as u32,
15754 LANE as u32,
15755 LANE as u32,
15756 LANE as u32,
15757 LANE as u32
15758 ]
15759 ),
15760 )
15761 }
15762}
15763#[doc = "Floating-point multiply extended"]
15764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15765#[inline(always)]
15766#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15767#[rustc_legacy_const_generics(2)]
15768#[target_feature(enable = "neon,fp16")]
15769#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
15770#[cfg(not(target_arch = "arm64ec"))]
15771pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15772 static_assert_uimm_bits!(LANE, 3);
15773 unsafe {
15774 vmulxq_f16(
15775 a,
15776 simd_shuffle!(
15777 b,
15778 b,
15779 [
15780 LANE as u32,
15781 LANE as u32,
15782 LANE as u32,
15783 LANE as u32,
15784 LANE as u32,
15785 LANE as u32,
15786 LANE as u32,
15787 LANE as u32
15788 ]
15789 ),
15790 )
15791 }
15792}
15793#[doc = "Floating-point multiply extended"]
15794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15795#[inline(always)]
15796#[target_feature(enable = "neon")]
15797#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15798#[rustc_legacy_const_generics(2)]
15799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15800pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15801 static_assert_uimm_bits!(LANE, 1);
15802 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15803}
15804#[doc = "Floating-point multiply extended"]
15805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15806#[inline(always)]
15807#[target_feature(enable = "neon")]
15808#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15809#[rustc_legacy_const_generics(2)]
15810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15811pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15812 static_assert_uimm_bits!(LANE, 2);
15813 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15814}
15815#[doc = "Floating-point multiply extended"]
15816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15817#[inline(always)]
15818#[target_feature(enable = "neon")]
15819#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15820#[rustc_legacy_const_generics(2)]
15821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15822pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15823 static_assert_uimm_bits!(LANE, 1);
15824 unsafe {
15825 vmulxq_f32(
15826 a,
15827 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15828 )
15829 }
15830}
15831#[doc = "Floating-point multiply extended"]
15832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15833#[inline(always)]
15834#[target_feature(enable = "neon")]
15835#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15836#[rustc_legacy_const_generics(2)]
15837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15838pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15839 static_assert_uimm_bits!(LANE, 2);
15840 unsafe {
15841 vmulxq_f32(
15842 a,
15843 simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15844 )
15845 }
15846}
15847#[doc = "Floating-point multiply extended"]
15848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15849#[inline(always)]
15850#[target_feature(enable = "neon")]
15851#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15852#[rustc_legacy_const_generics(2)]
15853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15854pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15855 static_assert_uimm_bits!(LANE, 1);
15856 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15857}
15858#[doc = "Floating-point multiply extended"]
15859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15860#[inline(always)]
15861#[target_feature(enable = "neon")]
15862#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15863#[rustc_legacy_const_generics(2)]
15864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15865pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15866 static_assert!(LANE == 0);
15867 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15868}
15869#[doc = "Floating-point multiply extended"]
15870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15871#[inline(always)]
15872#[target_feature(enable = "neon")]
15873#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15874#[rustc_legacy_const_generics(2)]
15875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15876pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15877 static_assert_uimm_bits!(LANE, 1);
15878 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15879}
15880#[doc = "Vector multiply by scalar"]
15881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15882#[inline(always)]
15883#[cfg_attr(test, assert_instr(fmulx))]
15884#[target_feature(enable = "neon,fp16")]
15885#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15886#[cfg(not(target_arch = "arm64ec"))]
15887pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15888 vmulx_f16(a, vdup_n_f16(b))
15889}
15890#[doc = "Vector multiply by scalar"]
15891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15892#[inline(always)]
15893#[cfg_attr(test, assert_instr(fmulx))]
15894#[target_feature(enable = "neon,fp16")]
15895#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15896#[cfg(not(target_arch = "arm64ec"))]
15897pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15898 vmulxq_f16(a, vdupq_n_f16(b))
15899}
15900#[doc = "Floating-point multiply extended"]
15901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15902#[inline(always)]
15903#[target_feature(enable = "neon")]
15904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15905#[cfg_attr(test, assert_instr(fmulx))]
15906pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15907 unsafe extern "unadjusted" {
15908 #[cfg_attr(
15909 any(target_arch = "aarch64", target_arch = "arm64ec"),
15910 link_name = "llvm.aarch64.neon.fmulx.f64"
15911 )]
15912 fn _vmulxd_f64(a: f64, b: f64) -> f64;
15913 }
15914 unsafe { _vmulxd_f64(a, b) }
15915}
15916#[doc = "Floating-point multiply extended"]
15917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15918#[inline(always)]
15919#[target_feature(enable = "neon")]
15920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15921#[cfg_attr(test, assert_instr(fmulx))]
15922pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15923 unsafe extern "unadjusted" {
15924 #[cfg_attr(
15925 any(target_arch = "aarch64", target_arch = "arm64ec"),
15926 link_name = "llvm.aarch64.neon.fmulx.f32"
15927 )]
15928 fn _vmulxs_f32(a: f32, b: f32) -> f32;
15929 }
15930 unsafe { _vmulxs_f32(a, b) }
15931}
15932#[doc = "Floating-point multiply extended"]
15933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15934#[inline(always)]
15935#[target_feature(enable = "neon")]
15936#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15937#[rustc_legacy_const_generics(2)]
15938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15939pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15940 static_assert!(LANE == 0);
15941 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15942}
15943#[doc = "Floating-point multiply extended"]
15944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15945#[inline(always)]
15946#[target_feature(enable = "neon")]
15947#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15948#[rustc_legacy_const_generics(2)]
15949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15950pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15951 static_assert_uimm_bits!(LANE, 1);
15952 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15953}
15954#[doc = "Floating-point multiply extended"]
15955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15956#[inline(always)]
15957#[target_feature(enable = "neon")]
15958#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15959#[rustc_legacy_const_generics(2)]
15960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15961pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15962 static_assert_uimm_bits!(LANE, 1);
15963 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15964}
15965#[doc = "Floating-point multiply extended"]
15966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15967#[inline(always)]
15968#[target_feature(enable = "neon")]
15969#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15970#[rustc_legacy_const_generics(2)]
15971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15972pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15973 static_assert_uimm_bits!(LANE, 2);
15974 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15975}
15976#[doc = "Floating-point multiply extended"]
15977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15978#[inline(always)]
15979#[target_feature(enable = "neon,fp16")]
15980#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15981#[cfg(not(target_arch = "arm64ec"))]
15982#[cfg_attr(test, assert_instr(fmulx))]
15983pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15984 unsafe extern "unadjusted" {
15985 #[cfg_attr(
15986 any(target_arch = "aarch64", target_arch = "arm64ec"),
15987 link_name = "llvm.aarch64.neon.fmulx.f16"
15988 )]
15989 fn _vmulxh_f16(a: f16, b: f16) -> f16;
15990 }
15991 unsafe { _vmulxh_f16(a, b) }
15992}
15993#[doc = "Floating-point multiply extended"]
15994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15995#[inline(always)]
15996#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15997#[rustc_legacy_const_generics(2)]
15998#[target_feature(enable = "neon,fp16")]
15999#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16000#[cfg(not(target_arch = "arm64ec"))]
16001pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
16002 static_assert_uimm_bits!(LANE, 2);
16003 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
16004}
16005#[doc = "Floating-point multiply extended"]
16006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
16007#[inline(always)]
16008#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
16009#[rustc_legacy_const_generics(2)]
16010#[target_feature(enable = "neon,fp16")]
16011#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16012#[cfg(not(target_arch = "arm64ec"))]
16013pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
16014 static_assert_uimm_bits!(LANE, 3);
16015 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
16016}
16017#[doc = "Floating-point multiply extended"]
16018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
16019#[inline(always)]
16020#[target_feature(enable = "neon")]
16021#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
16022#[rustc_legacy_const_generics(2)]
16023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16024pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
16025 static_assert!(LANE == 0);
16026 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
16027}
16028#[doc = "Negate"]
16029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
16030#[inline(always)]
16031#[target_feature(enable = "neon")]
16032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16033#[cfg_attr(test, assert_instr(fneg))]
16034pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
16035 unsafe { simd_neg(a) }
16036}
16037#[doc = "Negate"]
16038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
16039#[inline(always)]
16040#[target_feature(enable = "neon")]
16041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16042#[cfg_attr(test, assert_instr(fneg))]
16043pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
16044 unsafe { simd_neg(a) }
16045}
16046#[doc = "Negate"]
16047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
16048#[inline(always)]
16049#[target_feature(enable = "neon")]
16050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16051#[cfg_attr(test, assert_instr(neg))]
16052pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
16053 unsafe { simd_neg(a) }
16054}
16055#[doc = "Negate"]
16056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
16057#[inline(always)]
16058#[target_feature(enable = "neon")]
16059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16060#[cfg_attr(test, assert_instr(neg))]
16061pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
16062 unsafe { simd_neg(a) }
16063}
16064#[doc = "Negate"]
16065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
16066#[inline(always)]
16067#[target_feature(enable = "neon")]
16068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16069#[cfg_attr(test, assert_instr(neg))]
16070pub fn vnegd_s64(a: i64) -> i64 {
16071 a.wrapping_neg()
16072}
16073#[doc = "Negate"]
16074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
16075#[inline(always)]
16076#[target_feature(enable = "neon,fp16")]
16077#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16078#[cfg(not(target_arch = "arm64ec"))]
16079#[cfg_attr(test, assert_instr(fneg))]
16080pub fn vnegh_f16(a: f16) -> f16 {
16081 -a
16082}
16083#[doc = "Floating-point add pairwise"]
16084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
16085#[inline(always)]
16086#[target_feature(enable = "neon")]
16087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16088#[cfg_attr(test, assert_instr(nop))]
16089pub fn vpaddd_f64(a: float64x2_t) -> f64 {
16090 unsafe {
16091 let a1: f64 = simd_extract!(a, 0);
16092 let a2: f64 = simd_extract!(a, 1);
16093 a1 + a2
16094 }
16095}
16096#[doc = "Floating-point add pairwise"]
16097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
16098#[inline(always)]
16099#[target_feature(enable = "neon")]
16100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16101#[cfg_attr(test, assert_instr(nop))]
16102pub fn vpadds_f32(a: float32x2_t) -> f32 {
16103 unsafe {
16104 let a1: f32 = simd_extract!(a, 0);
16105 let a2: f32 = simd_extract!(a, 1);
16106 a1 + a2
16107 }
16108}
16109#[doc = "Add pairwise"]
16110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
16111#[inline(always)]
16112#[target_feature(enable = "neon")]
16113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16114#[cfg_attr(test, assert_instr(addp))]
16115pub fn vpaddd_s64(a: int64x2_t) -> i64 {
16116 unsafe { simd_reduce_add_ordered(a, 0) }
16117}
16118#[doc = "Add pairwise"]
16119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
16120#[inline(always)]
16121#[target_feature(enable = "neon")]
16122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16123#[cfg_attr(test, assert_instr(addp))]
16124pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
16125 unsafe { simd_reduce_add_ordered(a, 0) }
16126}
16127#[doc = "Floating-point add pairwise"]
16128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
16129#[inline(always)]
16130#[target_feature(enable = "neon,fp16")]
16131#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16132#[cfg(not(target_arch = "arm64ec"))]
16133#[cfg_attr(test, assert_instr(faddp))]
16134pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16135 unsafe extern "unadjusted" {
16136 #[cfg_attr(
16137 any(target_arch = "aarch64", target_arch = "arm64ec"),
16138 link_name = "llvm.aarch64.neon.faddp.v8f16"
16139 )]
16140 fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16141 }
16142 unsafe { _vpaddq_f16(a, b) }
16143}
16144#[doc = "Floating-point add pairwise"]
16145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
16146#[inline(always)]
16147#[target_feature(enable = "neon")]
16148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16149#[cfg_attr(test, assert_instr(faddp))]
16150pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16151 unsafe extern "unadjusted" {
16152 #[cfg_attr(
16153 any(target_arch = "aarch64", target_arch = "arm64ec"),
16154 link_name = "llvm.aarch64.neon.faddp.v4f32"
16155 )]
16156 fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16157 }
16158 unsafe { _vpaddq_f32(a, b) }
16159}
16160#[doc = "Floating-point add pairwise"]
16161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16162#[inline(always)]
16163#[target_feature(enable = "neon")]
16164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16165#[cfg_attr(test, assert_instr(faddp))]
16166pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16167 unsafe extern "unadjusted" {
16168 #[cfg_attr(
16169 any(target_arch = "aarch64", target_arch = "arm64ec"),
16170 link_name = "llvm.aarch64.neon.faddp.v2f64"
16171 )]
16172 fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16173 }
16174 unsafe { _vpaddq_f64(a, b) }
16175}
16176#[doc = "Add Pairwise"]
16177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16178#[inline(always)]
16179#[target_feature(enable = "neon")]
16180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16181#[cfg_attr(test, assert_instr(addp))]
16182pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16183 unsafe extern "unadjusted" {
16184 #[cfg_attr(
16185 any(target_arch = "aarch64", target_arch = "arm64ec"),
16186 link_name = "llvm.aarch64.neon.addp.v16i8"
16187 )]
16188 fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16189 }
16190 unsafe { _vpaddq_s8(a, b) }
16191}
16192#[doc = "Add Pairwise"]
16193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16194#[inline(always)]
16195#[target_feature(enable = "neon")]
16196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16197#[cfg_attr(test, assert_instr(addp))]
16198pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16199 unsafe extern "unadjusted" {
16200 #[cfg_attr(
16201 any(target_arch = "aarch64", target_arch = "arm64ec"),
16202 link_name = "llvm.aarch64.neon.addp.v8i16"
16203 )]
16204 fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16205 }
16206 unsafe { _vpaddq_s16(a, b) }
16207}
16208#[doc = "Add Pairwise"]
16209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16210#[inline(always)]
16211#[target_feature(enable = "neon")]
16212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16213#[cfg_attr(test, assert_instr(addp))]
16214pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16215 unsafe extern "unadjusted" {
16216 #[cfg_attr(
16217 any(target_arch = "aarch64", target_arch = "arm64ec"),
16218 link_name = "llvm.aarch64.neon.addp.v4i32"
16219 )]
16220 fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16221 }
16222 unsafe { _vpaddq_s32(a, b) }
16223}
16224#[doc = "Add Pairwise"]
16225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16226#[inline(always)]
16227#[target_feature(enable = "neon")]
16228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16229#[cfg_attr(test, assert_instr(addp))]
16230pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16231 unsafe extern "unadjusted" {
16232 #[cfg_attr(
16233 any(target_arch = "aarch64", target_arch = "arm64ec"),
16234 link_name = "llvm.aarch64.neon.addp.v2i64"
16235 )]
16236 fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
16237 }
16238 unsafe { _vpaddq_s64(a, b) }
16239}
16240#[doc = "Add Pairwise"]
16241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16242#[inline(always)]
16243#[cfg(target_endian = "little")]
16244#[target_feature(enable = "neon")]
16245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16246#[cfg_attr(test, assert_instr(addp))]
16247pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16248 unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
16249}
16250#[doc = "Add Pairwise"]
16251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16252#[inline(always)]
16253#[cfg(target_endian = "big")]
16254#[target_feature(enable = "neon")]
16255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16256#[cfg_attr(test, assert_instr(addp))]
16257pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16258 let a: uint8x16_t =
16259 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16260 let b: uint8x16_t =
16261 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16262 unsafe {
16263 let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
16264 simd_shuffle!(
16265 ret_val,
16266 ret_val,
16267 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
16268 )
16269 }
16270}
16271#[doc = "Add Pairwise"]
16272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16273#[inline(always)]
16274#[cfg(target_endian = "little")]
16275#[target_feature(enable = "neon")]
16276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16277#[cfg_attr(test, assert_instr(addp))]
16278pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16279 unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
16280}
16281#[doc = "Add Pairwise"]
16282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16283#[inline(always)]
16284#[cfg(target_endian = "big")]
16285#[target_feature(enable = "neon")]
16286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16287#[cfg_attr(test, assert_instr(addp))]
16288pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16289 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
16290 let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
16291 unsafe {
16292 let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
16293 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
16294 }
16295}
16296#[doc = "Add Pairwise"]
16297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16298#[inline(always)]
16299#[cfg(target_endian = "little")]
16300#[target_feature(enable = "neon")]
16301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16302#[cfg_attr(test, assert_instr(addp))]
16303pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16304 unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
16305}
16306#[doc = "Add Pairwise"]
16307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16308#[inline(always)]
16309#[cfg(target_endian = "big")]
16310#[target_feature(enable = "neon")]
16311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16312#[cfg_attr(test, assert_instr(addp))]
16313pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16314 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
16315 let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
16316 unsafe {
16317 let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
16318 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
16319 }
16320}
16321#[doc = "Add Pairwise"]
16322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16323#[inline(always)]
16324#[cfg(target_endian = "little")]
16325#[target_feature(enable = "neon")]
16326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16327#[cfg_attr(test, assert_instr(addp))]
16328pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16329 unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
16330}
16331#[doc = "Add Pairwise"]
16332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16333#[inline(always)]
16334#[cfg(target_endian = "big")]
16335#[target_feature(enable = "neon")]
16336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16337#[cfg_attr(test, assert_instr(addp))]
16338pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16339 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16340 let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16341 unsafe {
16342 let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16343 simd_shuffle!(ret_val, ret_val, [1, 0])
16344 }
16345}
16346#[doc = "Floating-point add pairwise"]
16347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16348#[inline(always)]
16349#[target_feature(enable = "neon,fp16")]
16350#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16351#[cfg(not(target_arch = "arm64ec"))]
16352#[cfg_attr(test, assert_instr(fmaxp))]
16353pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16354 unsafe extern "unadjusted" {
16355 #[cfg_attr(
16356 any(target_arch = "aarch64", target_arch = "arm64ec"),
16357 link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16358 )]
16359 fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16360 }
16361 unsafe { _vpmax_f16(a, b) }
16362}
16363#[doc = "Floating-point add pairwise"]
16364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16365#[inline(always)]
16366#[target_feature(enable = "neon,fp16")]
16367#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16368#[cfg(not(target_arch = "arm64ec"))]
16369#[cfg_attr(test, assert_instr(fmaxp))]
16370pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16371 unsafe extern "unadjusted" {
16372 #[cfg_attr(
16373 any(target_arch = "aarch64", target_arch = "arm64ec"),
16374 link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16375 )]
16376 fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16377 }
16378 unsafe { _vpmaxq_f16(a, b) }
16379}
16380#[doc = "Floating-point add pairwise"]
16381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16382#[inline(always)]
16383#[target_feature(enable = "neon,fp16")]
16384#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16385#[cfg(not(target_arch = "arm64ec"))]
16386#[cfg_attr(test, assert_instr(fmaxnmp))]
16387pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16388 unsafe extern "unadjusted" {
16389 #[cfg_attr(
16390 any(target_arch = "aarch64", target_arch = "arm64ec"),
16391 link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16392 )]
16393 fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16394 }
16395 unsafe { _vpmaxnm_f16(a, b) }
16396}
16397#[doc = "Floating-point add pairwise"]
16398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16399#[inline(always)]
16400#[target_feature(enable = "neon,fp16")]
16401#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16402#[cfg(not(target_arch = "arm64ec"))]
16403#[cfg_attr(test, assert_instr(fmaxnmp))]
16404pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16405 unsafe extern "unadjusted" {
16406 #[cfg_attr(
16407 any(target_arch = "aarch64", target_arch = "arm64ec"),
16408 link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16409 )]
16410 fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16411 }
16412 unsafe { _vpmaxnmq_f16(a, b) }
16413}
16414#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16416#[inline(always)]
16417#[target_feature(enable = "neon")]
16418#[cfg_attr(test, assert_instr(fmaxnmp))]
16419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16420pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16421 unsafe extern "unadjusted" {
16422 #[cfg_attr(
16423 any(target_arch = "aarch64", target_arch = "arm64ec"),
16424 link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16425 )]
16426 fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16427 }
16428 unsafe { _vpmaxnm_f32(a, b) }
16429}
16430#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16432#[inline(always)]
16433#[target_feature(enable = "neon")]
16434#[cfg_attr(test, assert_instr(fmaxnmp))]
16435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16436pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16437 unsafe extern "unadjusted" {
16438 #[cfg_attr(
16439 any(target_arch = "aarch64", target_arch = "arm64ec"),
16440 link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16441 )]
16442 fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16443 }
16444 unsafe { _vpmaxnmq_f32(a, b) }
16445}
16446#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16448#[inline(always)]
16449#[target_feature(enable = "neon")]
16450#[cfg_attr(test, assert_instr(fmaxnmp))]
16451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16452pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16453 unsafe extern "unadjusted" {
16454 #[cfg_attr(
16455 any(target_arch = "aarch64", target_arch = "arm64ec"),
16456 link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16457 )]
16458 fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16459 }
16460 unsafe { _vpmaxnmq_f64(a, b) }
16461}
16462#[doc = "Floating-point maximum number pairwise"]
16463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16464#[inline(always)]
16465#[target_feature(enable = "neon")]
16466#[cfg_attr(test, assert_instr(fmaxnmp))]
16467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16468pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16469 unsafe extern "unadjusted" {
16470 #[cfg_attr(
16471 any(target_arch = "aarch64", target_arch = "arm64ec"),
16472 link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16473 )]
16474 fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16475 }
16476 unsafe { _vpmaxnmqd_f64(a) }
16477}
16478#[doc = "Floating-point maximum number pairwise"]
16479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16480#[inline(always)]
16481#[target_feature(enable = "neon")]
16482#[cfg_attr(test, assert_instr(fmaxnmp))]
16483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16484pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16485 unsafe extern "unadjusted" {
16486 #[cfg_attr(
16487 any(target_arch = "aarch64", target_arch = "arm64ec"),
16488 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16489 )]
16490 fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16491 }
16492 unsafe { _vpmaxnms_f32(a) }
16493}
16494#[doc = "Folding maximum of adjacent pairs"]
16495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16496#[inline(always)]
16497#[target_feature(enable = "neon")]
16498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16499#[cfg_attr(test, assert_instr(fmaxp))]
16500pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16501 unsafe extern "unadjusted" {
16502 #[cfg_attr(
16503 any(target_arch = "aarch64", target_arch = "arm64ec"),
16504 link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16505 )]
16506 fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16507 }
16508 unsafe { _vpmaxq_f32(a, b) }
16509}
16510#[doc = "Folding maximum of adjacent pairs"]
16511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16512#[inline(always)]
16513#[target_feature(enable = "neon")]
16514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16515#[cfg_attr(test, assert_instr(fmaxp))]
16516pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16517 unsafe extern "unadjusted" {
16518 #[cfg_attr(
16519 any(target_arch = "aarch64", target_arch = "arm64ec"),
16520 link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16521 )]
16522 fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16523 }
16524 unsafe { _vpmaxq_f64(a, b) }
16525}
16526#[doc = "Folding maximum of adjacent pairs"]
16527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16528#[inline(always)]
16529#[target_feature(enable = "neon")]
16530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16531#[cfg_attr(test, assert_instr(smaxp))]
16532pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16533 unsafe extern "unadjusted" {
16534 #[cfg_attr(
16535 any(target_arch = "aarch64", target_arch = "arm64ec"),
16536 link_name = "llvm.aarch64.neon.smaxp.v16i8"
16537 )]
16538 fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16539 }
16540 unsafe { _vpmaxq_s8(a, b) }
16541}
16542#[doc = "Folding maximum of adjacent pairs"]
16543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16544#[inline(always)]
16545#[target_feature(enable = "neon")]
16546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16547#[cfg_attr(test, assert_instr(smaxp))]
16548pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16549 unsafe extern "unadjusted" {
16550 #[cfg_attr(
16551 any(target_arch = "aarch64", target_arch = "arm64ec"),
16552 link_name = "llvm.aarch64.neon.smaxp.v8i16"
16553 )]
16554 fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16555 }
16556 unsafe { _vpmaxq_s16(a, b) }
16557}
16558#[doc = "Folding maximum of adjacent pairs"]
16559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16560#[inline(always)]
16561#[target_feature(enable = "neon")]
16562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16563#[cfg_attr(test, assert_instr(smaxp))]
16564pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16565 unsafe extern "unadjusted" {
16566 #[cfg_attr(
16567 any(target_arch = "aarch64", target_arch = "arm64ec"),
16568 link_name = "llvm.aarch64.neon.smaxp.v4i32"
16569 )]
16570 fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16571 }
16572 unsafe { _vpmaxq_s32(a, b) }
16573}
16574#[doc = "Folding maximum of adjacent pairs"]
16575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16576#[inline(always)]
16577#[target_feature(enable = "neon")]
16578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16579#[cfg_attr(test, assert_instr(umaxp))]
16580pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16581 unsafe extern "unadjusted" {
16582 #[cfg_attr(
16583 any(target_arch = "aarch64", target_arch = "arm64ec"),
16584 link_name = "llvm.aarch64.neon.umaxp.v16i8"
16585 )]
16586 fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16587 }
16588 unsafe { _vpmaxq_u8(a, b) }
16589}
16590#[doc = "Folding maximum of adjacent pairs"]
16591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16592#[inline(always)]
16593#[target_feature(enable = "neon")]
16594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16595#[cfg_attr(test, assert_instr(umaxp))]
16596pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16597 unsafe extern "unadjusted" {
16598 #[cfg_attr(
16599 any(target_arch = "aarch64", target_arch = "arm64ec"),
16600 link_name = "llvm.aarch64.neon.umaxp.v8i16"
16601 )]
16602 fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16603 }
16604 unsafe { _vpmaxq_u16(a, b) }
16605}
16606#[doc = "Folding maximum of adjacent pairs"]
16607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16608#[inline(always)]
16609#[target_feature(enable = "neon")]
16610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16611#[cfg_attr(test, assert_instr(umaxp))]
16612pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16613 unsafe extern "unadjusted" {
16614 #[cfg_attr(
16615 any(target_arch = "aarch64", target_arch = "arm64ec"),
16616 link_name = "llvm.aarch64.neon.umaxp.v4i32"
16617 )]
16618 fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16619 }
16620 unsafe { _vpmaxq_u32(a, b) }
16621}
16622#[doc = "Floating-point maximum pairwise"]
16623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16624#[inline(always)]
16625#[target_feature(enable = "neon")]
16626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16627#[cfg_attr(test, assert_instr(fmaxp))]
16628pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16629 unsafe extern "unadjusted" {
16630 #[cfg_attr(
16631 any(target_arch = "aarch64", target_arch = "arm64ec"),
16632 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16633 )]
16634 fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16635 }
16636 unsafe { _vpmaxqd_f64(a) }
16637}
16638#[doc = "Floating-point maximum pairwise"]
16639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16640#[inline(always)]
16641#[target_feature(enable = "neon")]
16642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16643#[cfg_attr(test, assert_instr(fmaxp))]
16644pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16645 unsafe extern "unadjusted" {
16646 #[cfg_attr(
16647 any(target_arch = "aarch64", target_arch = "arm64ec"),
16648 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16649 )]
16650 fn _vpmaxs_f32(a: float32x2_t) -> f32;
16651 }
16652 unsafe { _vpmaxs_f32(a) }
16653}
16654#[doc = "Floating-point add pairwise"]
16655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16656#[inline(always)]
16657#[target_feature(enable = "neon,fp16")]
16658#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16659#[cfg(not(target_arch = "arm64ec"))]
16660#[cfg_attr(test, assert_instr(fminp))]
16661pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16662 unsafe extern "unadjusted" {
16663 #[cfg_attr(
16664 any(target_arch = "aarch64", target_arch = "arm64ec"),
16665 link_name = "llvm.aarch64.neon.fminp.v4f16"
16666 )]
16667 fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16668 }
16669 unsafe { _vpmin_f16(a, b) }
16670}
16671#[doc = "Floating-point add pairwise"]
16672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16673#[inline(always)]
16674#[target_feature(enable = "neon,fp16")]
16675#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16676#[cfg(not(target_arch = "arm64ec"))]
16677#[cfg_attr(test, assert_instr(fminp))]
16678pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16679 unsafe extern "unadjusted" {
16680 #[cfg_attr(
16681 any(target_arch = "aarch64", target_arch = "arm64ec"),
16682 link_name = "llvm.aarch64.neon.fminp.v8f16"
16683 )]
16684 fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16685 }
16686 unsafe { _vpminq_f16(a, b) }
16687}
16688#[doc = "Floating-point add pairwise"]
16689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16690#[inline(always)]
16691#[target_feature(enable = "neon,fp16")]
16692#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16693#[cfg(not(target_arch = "arm64ec"))]
16694#[cfg_attr(test, assert_instr(fminnmp))]
16695pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16696 unsafe extern "unadjusted" {
16697 #[cfg_attr(
16698 any(target_arch = "aarch64", target_arch = "arm64ec"),
16699 link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16700 )]
16701 fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16702 }
16703 unsafe { _vpminnm_f16(a, b) }
16704}
16705#[doc = "Floating-point add pairwise"]
16706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16707#[inline(always)]
16708#[target_feature(enable = "neon,fp16")]
16709#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
16710#[cfg(not(target_arch = "arm64ec"))]
16711#[cfg_attr(test, assert_instr(fminnmp))]
16712pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16713 unsafe extern "unadjusted" {
16714 #[cfg_attr(
16715 any(target_arch = "aarch64", target_arch = "arm64ec"),
16716 link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16717 )]
16718 fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16719 }
16720 unsafe { _vpminnmq_f16(a, b) }
16721}
16722#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16724#[inline(always)]
16725#[target_feature(enable = "neon")]
16726#[cfg_attr(test, assert_instr(fminnmp))]
16727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16728pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16729 unsafe extern "unadjusted" {
16730 #[cfg_attr(
16731 any(target_arch = "aarch64", target_arch = "arm64ec"),
16732 link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16733 )]
16734 fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16735 }
16736 unsafe { _vpminnm_f32(a, b) }
16737}
16738#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16740#[inline(always)]
16741#[target_feature(enable = "neon")]
16742#[cfg_attr(test, assert_instr(fminnmp))]
16743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16744pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16745 unsafe extern "unadjusted" {
16746 #[cfg_attr(
16747 any(target_arch = "aarch64", target_arch = "arm64ec"),
16748 link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16749 )]
16750 fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16751 }
16752 unsafe { _vpminnmq_f32(a, b) }
16753}
16754#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16756#[inline(always)]
16757#[target_feature(enable = "neon")]
16758#[cfg_attr(test, assert_instr(fminnmp))]
16759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16760pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16761 unsafe extern "unadjusted" {
16762 #[cfg_attr(
16763 any(target_arch = "aarch64", target_arch = "arm64ec"),
16764 link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16765 )]
16766 fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16767 }
16768 unsafe { _vpminnmq_f64(a, b) }
16769}
16770#[doc = "Floating-point minimum number pairwise"]
16771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16772#[inline(always)]
16773#[target_feature(enable = "neon")]
16774#[cfg_attr(test, assert_instr(fminnmp))]
16775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16776pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16777 unsafe extern "unadjusted" {
16778 #[cfg_attr(
16779 any(target_arch = "aarch64", target_arch = "arm64ec"),
16780 link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16781 )]
16782 fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16783 }
16784 unsafe { _vpminnmqd_f64(a) }
16785}
16786#[doc = "Floating-point minimum number pairwise"]
16787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16788#[inline(always)]
16789#[target_feature(enable = "neon")]
16790#[cfg_attr(test, assert_instr(fminnmp))]
16791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16792pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16793 unsafe extern "unadjusted" {
16794 #[cfg_attr(
16795 any(target_arch = "aarch64", target_arch = "arm64ec"),
16796 link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16797 )]
16798 fn _vpminnms_f32(a: float32x2_t) -> f32;
16799 }
16800 unsafe { _vpminnms_f32(a) }
16801}
16802#[doc = "Folding minimum of adjacent pairs"]
16803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16804#[inline(always)]
16805#[target_feature(enable = "neon")]
16806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16807#[cfg_attr(test, assert_instr(fminp))]
16808pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16809 unsafe extern "unadjusted" {
16810 #[cfg_attr(
16811 any(target_arch = "aarch64", target_arch = "arm64ec"),
16812 link_name = "llvm.aarch64.neon.fminp.v4f32"
16813 )]
16814 fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16815 }
16816 unsafe { _vpminq_f32(a, b) }
16817}
16818#[doc = "Folding minimum of adjacent pairs"]
16819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16820#[inline(always)]
16821#[target_feature(enable = "neon")]
16822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16823#[cfg_attr(test, assert_instr(fminp))]
16824pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16825 unsafe extern "unadjusted" {
16826 #[cfg_attr(
16827 any(target_arch = "aarch64", target_arch = "arm64ec"),
16828 link_name = "llvm.aarch64.neon.fminp.v2f64"
16829 )]
16830 fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16831 }
16832 unsafe { _vpminq_f64(a, b) }
16833}
16834#[doc = "Folding minimum of adjacent pairs"]
16835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16836#[inline(always)]
16837#[target_feature(enable = "neon")]
16838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16839#[cfg_attr(test, assert_instr(sminp))]
16840pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16841 unsafe extern "unadjusted" {
16842 #[cfg_attr(
16843 any(target_arch = "aarch64", target_arch = "arm64ec"),
16844 link_name = "llvm.aarch64.neon.sminp.v16i8"
16845 )]
16846 fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16847 }
16848 unsafe { _vpminq_s8(a, b) }
16849}
16850#[doc = "Folding minimum of adjacent pairs"]
16851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16852#[inline(always)]
16853#[target_feature(enable = "neon")]
16854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16855#[cfg_attr(test, assert_instr(sminp))]
16856pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16857 unsafe extern "unadjusted" {
16858 #[cfg_attr(
16859 any(target_arch = "aarch64", target_arch = "arm64ec"),
16860 link_name = "llvm.aarch64.neon.sminp.v8i16"
16861 )]
16862 fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16863 }
16864 unsafe { _vpminq_s16(a, b) }
16865}
16866#[doc = "Folding minimum of adjacent pairs"]
16867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16868#[inline(always)]
16869#[target_feature(enable = "neon")]
16870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16871#[cfg_attr(test, assert_instr(sminp))]
16872pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16873 unsafe extern "unadjusted" {
16874 #[cfg_attr(
16875 any(target_arch = "aarch64", target_arch = "arm64ec"),
16876 link_name = "llvm.aarch64.neon.sminp.v4i32"
16877 )]
16878 fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16879 }
16880 unsafe { _vpminq_s32(a, b) }
16881}
16882#[doc = "Folding minimum of adjacent pairs"]
16883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16884#[inline(always)]
16885#[target_feature(enable = "neon")]
16886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16887#[cfg_attr(test, assert_instr(uminp))]
16888pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16889 unsafe extern "unadjusted" {
16890 #[cfg_attr(
16891 any(target_arch = "aarch64", target_arch = "arm64ec"),
16892 link_name = "llvm.aarch64.neon.uminp.v16i8"
16893 )]
16894 fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16895 }
16896 unsafe { _vpminq_u8(a, b) }
16897}
16898#[doc = "Folding minimum of adjacent pairs"]
16899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16900#[inline(always)]
16901#[target_feature(enable = "neon")]
16902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16903#[cfg_attr(test, assert_instr(uminp))]
16904pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16905 unsafe extern "unadjusted" {
16906 #[cfg_attr(
16907 any(target_arch = "aarch64", target_arch = "arm64ec"),
16908 link_name = "llvm.aarch64.neon.uminp.v8i16"
16909 )]
16910 fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16911 }
16912 unsafe { _vpminq_u16(a, b) }
16913}
16914#[doc = "Folding minimum of adjacent pairs"]
16915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16916#[inline(always)]
16917#[target_feature(enable = "neon")]
16918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16919#[cfg_attr(test, assert_instr(uminp))]
16920pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16921 unsafe extern "unadjusted" {
16922 #[cfg_attr(
16923 any(target_arch = "aarch64", target_arch = "arm64ec"),
16924 link_name = "llvm.aarch64.neon.uminp.v4i32"
16925 )]
16926 fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16927 }
16928 unsafe { _vpminq_u32(a, b) }
16929}
16930#[doc = "Floating-point minimum pairwise"]
16931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16932#[inline(always)]
16933#[target_feature(enable = "neon")]
16934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16935#[cfg_attr(test, assert_instr(fminp))]
16936pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16937 unsafe extern "unadjusted" {
16938 #[cfg_attr(
16939 any(target_arch = "aarch64", target_arch = "arm64ec"),
16940 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16941 )]
16942 fn _vpminqd_f64(a: float64x2_t) -> f64;
16943 }
16944 unsafe { _vpminqd_f64(a) }
16945}
16946#[doc = "Floating-point minimum pairwise"]
16947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16948#[inline(always)]
16949#[target_feature(enable = "neon")]
16950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16951#[cfg_attr(test, assert_instr(fminp))]
16952pub fn vpmins_f32(a: float32x2_t) -> f32 {
16953 unsafe extern "unadjusted" {
16954 #[cfg_attr(
16955 any(target_arch = "aarch64", target_arch = "arm64ec"),
16956 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16957 )]
16958 fn _vpmins_f32(a: float32x2_t) -> f32;
16959 }
16960 unsafe { _vpmins_f32(a) }
16961}
16962#[doc = "Signed saturating Absolute value"]
16963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16964#[inline(always)]
16965#[target_feature(enable = "neon")]
16966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16967#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16968pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16969 unsafe extern "unadjusted" {
16970 #[cfg_attr(
16971 any(target_arch = "aarch64", target_arch = "arm64ec"),
16972 link_name = "llvm.aarch64.neon.sqabs.v1i64"
16973 )]
16974 fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16975 }
16976 unsafe { _vqabs_s64(a) }
16977}
16978#[doc = "Signed saturating Absolute value"]
16979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16980#[inline(always)]
16981#[target_feature(enable = "neon")]
16982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16983#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16984pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16985 unsafe extern "unadjusted" {
16986 #[cfg_attr(
16987 any(target_arch = "aarch64", target_arch = "arm64ec"),
16988 link_name = "llvm.aarch64.neon.sqabs.v2i64"
16989 )]
16990 fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16991 }
16992 unsafe { _vqabsq_s64(a) }
16993}
16994#[doc = "Signed saturating absolute value"]
16995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16996#[inline(always)]
16997#[target_feature(enable = "neon")]
16998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16999#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17000pub fn vqabsb_s8(a: i8) -> i8 {
17001 unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
17002}
17003#[doc = "Signed saturating absolute value"]
17004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
17005#[inline(always)]
17006#[target_feature(enable = "neon")]
17007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17008#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17009pub fn vqabsh_s16(a: i16) -> i16 {
17010 unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
17011}
17012#[doc = "Signed saturating absolute value"]
17013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
17014#[inline(always)]
17015#[target_feature(enable = "neon")]
17016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17017#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17018pub fn vqabss_s32(a: i32) -> i32 {
17019 unsafe extern "unadjusted" {
17020 #[cfg_attr(
17021 any(target_arch = "aarch64", target_arch = "arm64ec"),
17022 link_name = "llvm.aarch64.neon.sqabs.i32"
17023 )]
17024 fn _vqabss_s32(a: i32) -> i32;
17025 }
17026 unsafe { _vqabss_s32(a) }
17027}
17028#[doc = "Signed saturating absolute value"]
17029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
17030#[inline(always)]
17031#[target_feature(enable = "neon")]
17032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17033#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17034pub fn vqabsd_s64(a: i64) -> i64 {
17035 unsafe extern "unadjusted" {
17036 #[cfg_attr(
17037 any(target_arch = "aarch64", target_arch = "arm64ec"),
17038 link_name = "llvm.aarch64.neon.sqabs.i64"
17039 )]
17040 fn _vqabsd_s64(a: i64) -> i64;
17041 }
17042 unsafe { _vqabsd_s64(a) }
17043}
17044#[doc = "Saturating add"]
17045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
17046#[inline(always)]
17047#[target_feature(enable = "neon")]
17048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17049#[cfg_attr(test, assert_instr(sqadd))]
17050pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
17051 let a: int8x8_t = vdup_n_s8(a);
17052 let b: int8x8_t = vdup_n_s8(b);
17053 unsafe { simd_extract!(vqadd_s8(a, b), 0) }
17054}
17055#[doc = "Saturating add"]
17056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
17057#[inline(always)]
17058#[target_feature(enable = "neon")]
17059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17060#[cfg_attr(test, assert_instr(sqadd))]
17061pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
17062 let a: int16x4_t = vdup_n_s16(a);
17063 let b: int16x4_t = vdup_n_s16(b);
17064 unsafe { simd_extract!(vqadd_s16(a, b), 0) }
17065}
17066#[doc = "Saturating add"]
17067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
17068#[inline(always)]
17069#[target_feature(enable = "neon")]
17070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17071#[cfg_attr(test, assert_instr(uqadd))]
17072pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
17073 let a: uint8x8_t = vdup_n_u8(a);
17074 let b: uint8x8_t = vdup_n_u8(b);
17075 unsafe { simd_extract!(vqadd_u8(a, b), 0) }
17076}
17077#[doc = "Saturating add"]
17078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
17079#[inline(always)]
17080#[target_feature(enable = "neon")]
17081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17082#[cfg_attr(test, assert_instr(uqadd))]
17083pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
17084 let a: uint16x4_t = vdup_n_u16(a);
17085 let b: uint16x4_t = vdup_n_u16(b);
17086 unsafe { simd_extract!(vqadd_u16(a, b), 0) }
17087}
17088#[doc = "Saturating add"]
17089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
17090#[inline(always)]
17091#[target_feature(enable = "neon")]
17092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17093#[cfg_attr(test, assert_instr(sqadd))]
17094pub fn vqadds_s32(a: i32, b: i32) -> i32 {
17095 unsafe extern "unadjusted" {
17096 #[cfg_attr(
17097 any(target_arch = "aarch64", target_arch = "arm64ec"),
17098 link_name = "llvm.aarch64.neon.sqadd.i32"
17099 )]
17100 fn _vqadds_s32(a: i32, b: i32) -> i32;
17101 }
17102 unsafe { _vqadds_s32(a, b) }
17103}
17104#[doc = "Saturating add"]
17105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
17106#[inline(always)]
17107#[target_feature(enable = "neon")]
17108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17109#[cfg_attr(test, assert_instr(sqadd))]
17110pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
17111 unsafe extern "unadjusted" {
17112 #[cfg_attr(
17113 any(target_arch = "aarch64", target_arch = "arm64ec"),
17114 link_name = "llvm.aarch64.neon.sqadd.i64"
17115 )]
17116 fn _vqaddd_s64(a: i64, b: i64) -> i64;
17117 }
17118 unsafe { _vqaddd_s64(a, b) }
17119}
17120#[doc = "Saturating add"]
17121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
17122#[inline(always)]
17123#[target_feature(enable = "neon")]
17124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17125#[cfg_attr(test, assert_instr(uqadd))]
17126pub fn vqadds_u32(a: u32, b: u32) -> u32 {
17127 unsafe extern "unadjusted" {
17128 #[cfg_attr(
17129 any(target_arch = "aarch64", target_arch = "arm64ec"),
17130 link_name = "llvm.aarch64.neon.uqadd.i32"
17131 )]
17132 fn _vqadds_u32(a: u32, b: u32) -> u32;
17133 }
17134 unsafe { _vqadds_u32(a, b) }
17135}
17136#[doc = "Saturating add"]
17137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
17138#[inline(always)]
17139#[target_feature(enable = "neon")]
17140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17141#[cfg_attr(test, assert_instr(uqadd))]
17142pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
17143 unsafe extern "unadjusted" {
17144 #[cfg_attr(
17145 any(target_arch = "aarch64", target_arch = "arm64ec"),
17146 link_name = "llvm.aarch64.neon.uqadd.i64"
17147 )]
17148 fn _vqaddd_u64(a: u64, b: u64) -> u64;
17149 }
17150 unsafe { _vqaddd_u64(a, b) }
17151}
17152#[doc = "Signed saturating doubling multiply-add long"]
17153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
17154#[inline(always)]
17155#[target_feature(enable = "neon")]
17156#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17157#[rustc_legacy_const_generics(3)]
17158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17159pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17160 static_assert_uimm_bits!(N, 2);
17161 vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17162}
17163#[doc = "Signed saturating doubling multiply-add long"]
17164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
17165#[inline(always)]
17166#[target_feature(enable = "neon")]
17167#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17168#[rustc_legacy_const_generics(3)]
17169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17170pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17171 static_assert_uimm_bits!(N, 3);
17172 vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17173}
17174#[doc = "Signed saturating doubling multiply-add long"]
17175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
17176#[inline(always)]
17177#[target_feature(enable = "neon")]
17178#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17179#[rustc_legacy_const_generics(3)]
17180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17181pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17182 static_assert_uimm_bits!(N, 1);
17183 vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17184}
17185#[doc = "Signed saturating doubling multiply-add long"]
17186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17187#[inline(always)]
17188#[target_feature(enable = "neon")]
17189#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17190#[rustc_legacy_const_generics(3)]
17191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17192pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17193 static_assert_uimm_bits!(N, 2);
17194 vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17195}
17196#[doc = "Signed saturating doubling multiply-add long"]
17197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17198#[inline(always)]
17199#[target_feature(enable = "neon")]
17200#[cfg_attr(test, assert_instr(sqdmlal2))]
17201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17202pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17203 vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17204}
17205#[doc = "Signed saturating doubling multiply-add long"]
17206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17207#[inline(always)]
17208#[target_feature(enable = "neon")]
17209#[cfg_attr(test, assert_instr(sqdmlal2))]
17210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17211pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17212 vqaddq_s32(a, vqdmull_high_s16(b, c))
17213}
17214#[doc = "Signed saturating doubling multiply-add long"]
17215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17216#[inline(always)]
17217#[target_feature(enable = "neon")]
17218#[cfg_attr(test, assert_instr(sqdmlal2))]
17219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17220pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17221 vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17222}
17223#[doc = "Signed saturating doubling multiply-add long"]
17224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17225#[inline(always)]
17226#[target_feature(enable = "neon")]
17227#[cfg_attr(test, assert_instr(sqdmlal2))]
17228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17229pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17230 vqaddq_s64(a, vqdmull_high_s32(b, c))
17231}
17232#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17234#[inline(always)]
17235#[target_feature(enable = "neon")]
17236#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17237#[rustc_legacy_const_generics(3)]
17238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17239pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17240 static_assert_uimm_bits!(N, 3);
17241 vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17242}
17243#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17245#[inline(always)]
17246#[target_feature(enable = "neon")]
17247#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17248#[rustc_legacy_const_generics(3)]
17249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17250pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17251 static_assert_uimm_bits!(N, 2);
17252 vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17253}
17254#[doc = "Signed saturating doubling multiply-add long"]
17255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17256#[inline(always)]
17257#[target_feature(enable = "neon")]
17258#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17259#[rustc_legacy_const_generics(3)]
17260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17261pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17262 static_assert_uimm_bits!(LANE, 2);
17263 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17264}
17265#[doc = "Signed saturating doubling multiply-add long"]
17266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17267#[inline(always)]
17268#[target_feature(enable = "neon")]
17269#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17270#[rustc_legacy_const_generics(3)]
17271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17272pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17273 static_assert_uimm_bits!(LANE, 3);
17274 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17275}
17276#[doc = "Signed saturating doubling multiply-add long"]
17277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17278#[inline(always)]
17279#[target_feature(enable = "neon")]
17280#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17281#[rustc_legacy_const_generics(3)]
17282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17283pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17284 static_assert_uimm_bits!(LANE, 1);
17285 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17286}
17287#[doc = "Signed saturating doubling multiply-add long"]
17288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17289#[inline(always)]
17290#[target_feature(enable = "neon")]
17291#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17292#[rustc_legacy_const_generics(3)]
17293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17294pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17295 static_assert_uimm_bits!(LANE, 2);
17296 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17297}
17298#[doc = "Signed saturating doubling multiply-add long"]
17299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17300#[inline(always)]
17301#[target_feature(enable = "neon")]
17302#[cfg_attr(test, assert_instr(sqdmlal))]
17303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17304pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17305 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17306 unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17307}
17308#[doc = "Signed saturating doubling multiply-add long"]
17309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17310#[inline(always)]
17311#[target_feature(enable = "neon")]
17312#[cfg_attr(test, assert_instr(sqdmlal))]
17313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17314pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17315 let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17316 x
17317}
17318#[doc = "Signed saturating doubling multiply-subtract long"]
17319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17320#[inline(always)]
17321#[target_feature(enable = "neon")]
17322#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17323#[rustc_legacy_const_generics(3)]
17324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17325pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17326 static_assert_uimm_bits!(N, 2);
17327 vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17328}
17329#[doc = "Signed saturating doubling multiply-subtract long"]
17330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17331#[inline(always)]
17332#[target_feature(enable = "neon")]
17333#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17334#[rustc_legacy_const_generics(3)]
17335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17336pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17337 static_assert_uimm_bits!(N, 3);
17338 vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17339}
17340#[doc = "Signed saturating doubling multiply-subtract long"]
17341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17342#[inline(always)]
17343#[target_feature(enable = "neon")]
17344#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17345#[rustc_legacy_const_generics(3)]
17346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17347pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17348 static_assert_uimm_bits!(N, 1);
17349 vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17350}
17351#[doc = "Signed saturating doubling multiply-subtract long"]
17352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17353#[inline(always)]
17354#[target_feature(enable = "neon")]
17355#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17356#[rustc_legacy_const_generics(3)]
17357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17358pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17359 static_assert_uimm_bits!(N, 2);
17360 vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17361}
17362#[doc = "Signed saturating doubling multiply-subtract long"]
17363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17364#[inline(always)]
17365#[target_feature(enable = "neon")]
17366#[cfg_attr(test, assert_instr(sqdmlsl2))]
17367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17368pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17369 vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17370}
17371#[doc = "Signed saturating doubling multiply-subtract long"]
17372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17373#[inline(always)]
17374#[target_feature(enable = "neon")]
17375#[cfg_attr(test, assert_instr(sqdmlsl2))]
17376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17377pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17378 vqsubq_s32(a, vqdmull_high_s16(b, c))
17379}
17380#[doc = "Signed saturating doubling multiply-subtract long"]
17381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17382#[inline(always)]
17383#[target_feature(enable = "neon")]
17384#[cfg_attr(test, assert_instr(sqdmlsl2))]
17385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17386pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17387 vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17388}
17389#[doc = "Signed saturating doubling multiply-subtract long"]
17390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17391#[inline(always)]
17392#[target_feature(enable = "neon")]
17393#[cfg_attr(test, assert_instr(sqdmlsl2))]
17394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17395pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17396 vqsubq_s64(a, vqdmull_high_s32(b, c))
17397}
17398#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17400#[inline(always)]
17401#[target_feature(enable = "neon")]
17402#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17403#[rustc_legacy_const_generics(3)]
17404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17405pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17406 static_assert_uimm_bits!(N, 3);
17407 vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17408}
17409#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17411#[inline(always)]
17412#[target_feature(enable = "neon")]
17413#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17414#[rustc_legacy_const_generics(3)]
17415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17416pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17417 static_assert_uimm_bits!(N, 2);
17418 vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17419}
17420#[doc = "Signed saturating doubling multiply-subtract long"]
17421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17422#[inline(always)]
17423#[target_feature(enable = "neon")]
17424#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17425#[rustc_legacy_const_generics(3)]
17426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17427pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17428 static_assert_uimm_bits!(LANE, 2);
17429 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17430}
17431#[doc = "Signed saturating doubling multiply-subtract long"]
17432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17433#[inline(always)]
17434#[target_feature(enable = "neon")]
17435#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17436#[rustc_legacy_const_generics(3)]
17437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17438pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17439 static_assert_uimm_bits!(LANE, 3);
17440 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17441}
17442#[doc = "Signed saturating doubling multiply-subtract long"]
17443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17444#[inline(always)]
17445#[target_feature(enable = "neon")]
17446#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17447#[rustc_legacy_const_generics(3)]
17448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17449pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17450 static_assert_uimm_bits!(LANE, 1);
17451 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17452}
17453#[doc = "Signed saturating doubling multiply-subtract long"]
17454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17455#[inline(always)]
17456#[target_feature(enable = "neon")]
17457#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17458#[rustc_legacy_const_generics(3)]
17459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17460pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17461 static_assert_uimm_bits!(LANE, 2);
17462 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17463}
17464#[doc = "Signed saturating doubling multiply-subtract long"]
17465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17466#[inline(always)]
17467#[target_feature(enable = "neon")]
17468#[cfg_attr(test, assert_instr(sqdmlsl))]
17469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17470pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17471 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17472 unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17473}
17474#[doc = "Signed saturating doubling multiply-subtract long"]
17475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17476#[inline(always)]
17477#[target_feature(enable = "neon")]
17478#[cfg_attr(test, assert_instr(sqdmlsl))]
17479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17480pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17481 let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17482 x
17483}
17484#[doc = "Vector saturating doubling multiply high by scalar"]
17485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17486#[inline(always)]
17487#[target_feature(enable = "neon")]
17488#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17489#[rustc_legacy_const_generics(2)]
17490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17491pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17492 static_assert_uimm_bits!(LANE, 2);
17493 unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17494}
17495#[doc = "Vector saturating doubling multiply high by scalar"]
17496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17497#[inline(always)]
17498#[target_feature(enable = "neon")]
17499#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17500#[rustc_legacy_const_generics(2)]
17501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17502pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17503 static_assert_uimm_bits!(LANE, 2);
17504 unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17505}
17506#[doc = "Vector saturating doubling multiply high by scalar"]
17507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17508#[inline(always)]
17509#[target_feature(enable = "neon")]
17510#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17511#[rustc_legacy_const_generics(2)]
17512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17513pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17514 static_assert_uimm_bits!(LANE, 1);
17515 unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17516}
17517#[doc = "Vector saturating doubling multiply high by scalar"]
17518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17519#[inline(always)]
17520#[target_feature(enable = "neon")]
17521#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17522#[rustc_legacy_const_generics(2)]
17523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17524pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17525 static_assert_uimm_bits!(LANE, 1);
17526 unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17527}
17528#[doc = "Signed saturating doubling multiply returning high half"]
17529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17530#[inline(always)]
17531#[target_feature(enable = "neon")]
17532#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17533#[rustc_legacy_const_generics(2)]
17534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17535pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17536 static_assert_uimm_bits!(N, 2);
17537 unsafe {
17538 let b: i16 = simd_extract!(b, N as u32);
17539 vqdmulhh_s16(a, b)
17540 }
17541}
17542#[doc = "Signed saturating doubling multiply returning high half"]
17543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17544#[inline(always)]
17545#[target_feature(enable = "neon")]
17546#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17547#[rustc_legacy_const_generics(2)]
17548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17549pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17550 static_assert_uimm_bits!(N, 3);
17551 unsafe {
17552 let b: i16 = simd_extract!(b, N as u32);
17553 vqdmulhh_s16(a, b)
17554 }
17555}
17556#[doc = "Signed saturating doubling multiply returning high half"]
17557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17558#[inline(always)]
17559#[target_feature(enable = "neon")]
17560#[cfg_attr(test, assert_instr(sqdmulh))]
17561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17562pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17563 let a: int16x4_t = vdup_n_s16(a);
17564 let b: int16x4_t = vdup_n_s16(b);
17565 unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17566}
17567#[doc = "Signed saturating doubling multiply returning high half"]
17568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17569#[inline(always)]
17570#[target_feature(enable = "neon")]
17571#[cfg_attr(test, assert_instr(sqdmulh))]
17572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17573pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17574 let a: int32x2_t = vdup_n_s32(a);
17575 let b: int32x2_t = vdup_n_s32(b);
17576 unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17577}
17578#[doc = "Signed saturating doubling multiply returning high half"]
17579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17580#[inline(always)]
17581#[target_feature(enable = "neon")]
17582#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17583#[rustc_legacy_const_generics(2)]
17584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17585pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17586 static_assert_uimm_bits!(N, 1);
17587 unsafe {
17588 let b: i32 = simd_extract!(b, N as u32);
17589 vqdmulhs_s32(a, b)
17590 }
17591}
17592#[doc = "Signed saturating doubling multiply returning high half"]
17593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17594#[inline(always)]
17595#[target_feature(enable = "neon")]
17596#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17597#[rustc_legacy_const_generics(2)]
17598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17599pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17600 static_assert_uimm_bits!(N, 2);
17601 unsafe {
17602 let b: i32 = simd_extract!(b, N as u32);
17603 vqdmulhs_s32(a, b)
17604 }
17605}
17606#[doc = "Signed saturating doubling multiply long"]
17607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17608#[inline(always)]
17609#[target_feature(enable = "neon")]
17610#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17611#[rustc_legacy_const_generics(2)]
17612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17613pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17614 static_assert_uimm_bits!(N, 2);
17615 unsafe {
17616 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17617 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17618 vqdmull_s16(a, b)
17619 }
17620}
17621#[doc = "Signed saturating doubling multiply long"]
17622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17623#[inline(always)]
17624#[target_feature(enable = "neon")]
17625#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17626#[rustc_legacy_const_generics(2)]
17627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17628pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17629 static_assert_uimm_bits!(N, 2);
17630 unsafe {
17631 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17632 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17633 vqdmull_s32(a, b)
17634 }
17635}
17636#[doc = "Signed saturating doubling multiply long"]
17637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17638#[inline(always)]
17639#[target_feature(enable = "neon")]
17640#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17641#[rustc_legacy_const_generics(2)]
17642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17643pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17644 static_assert_uimm_bits!(N, 1);
17645 unsafe {
17646 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17647 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17648 vqdmull_s32(a, b)
17649 }
17650}
17651#[doc = "Signed saturating doubling multiply long"]
17652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17653#[inline(always)]
17654#[target_feature(enable = "neon")]
17655#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17656#[rustc_legacy_const_generics(2)]
17657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17658pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17659 static_assert_uimm_bits!(N, 3);
17660 unsafe {
17661 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17662 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17663 vqdmull_s16(a, b)
17664 }
17665}
17666#[doc = "Signed saturating doubling multiply long"]
17667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17668#[inline(always)]
17669#[target_feature(enable = "neon")]
17670#[cfg_attr(test, assert_instr(sqdmull2))]
17671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17672pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17673 unsafe {
17674 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17675 let b: int16x4_t = vdup_n_s16(b);
17676 vqdmull_s16(a, b)
17677 }
17678}
17679#[doc = "Signed saturating doubling multiply long"]
17680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17681#[inline(always)]
17682#[target_feature(enable = "neon")]
17683#[cfg_attr(test, assert_instr(sqdmull2))]
17684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17685pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17686 unsafe {
17687 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17688 let b: int32x2_t = vdup_n_s32(b);
17689 vqdmull_s32(a, b)
17690 }
17691}
17692#[doc = "Signed saturating doubling multiply long"]
17693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17694#[inline(always)]
17695#[target_feature(enable = "neon")]
17696#[cfg_attr(test, assert_instr(sqdmull2))]
17697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17698pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17699 unsafe {
17700 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17701 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17702 vqdmull_s16(a, b)
17703 }
17704}
17705#[doc = "Signed saturating doubling multiply long"]
17706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17707#[inline(always)]
17708#[target_feature(enable = "neon")]
17709#[cfg_attr(test, assert_instr(sqdmull2))]
17710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17711pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17712 unsafe {
17713 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17714 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17715 vqdmull_s32(a, b)
17716 }
17717}
17718#[doc = "Vector saturating doubling long multiply by scalar"]
17719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17720#[inline(always)]
17721#[target_feature(enable = "neon")]
17722#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17723#[rustc_legacy_const_generics(2)]
17724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17725pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17726 static_assert_uimm_bits!(N, 3);
17727 unsafe {
17728 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17729 vqdmull_s16(a, b)
17730 }
17731}
17732#[doc = "Vector saturating doubling long multiply by scalar"]
17733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17734#[inline(always)]
17735#[target_feature(enable = "neon")]
17736#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17737#[rustc_legacy_const_generics(2)]
17738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17739pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17740 static_assert_uimm_bits!(N, 2);
17741 unsafe {
17742 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17743 vqdmull_s32(a, b)
17744 }
17745}
17746#[doc = "Signed saturating doubling multiply long"]
17747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17748#[inline(always)]
17749#[target_feature(enable = "neon")]
17750#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17751#[rustc_legacy_const_generics(2)]
17752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17753pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17754 static_assert_uimm_bits!(N, 2);
17755 unsafe {
17756 let b: i16 = simd_extract!(b, N as u32);
17757 vqdmullh_s16(a, b)
17758 }
17759}
17760#[doc = "Signed saturating doubling multiply long"]
17761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17762#[inline(always)]
17763#[target_feature(enable = "neon")]
17764#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17765#[rustc_legacy_const_generics(2)]
17766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17767pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17768 static_assert_uimm_bits!(N, 2);
17769 unsafe {
17770 let b: i32 = simd_extract!(b, N as u32);
17771 vqdmulls_s32(a, b)
17772 }
17773}
17774#[doc = "Signed saturating doubling multiply long"]
17775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17776#[inline(always)]
17777#[target_feature(enable = "neon")]
17778#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17779#[rustc_legacy_const_generics(2)]
17780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17781pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17782 static_assert_uimm_bits!(N, 3);
17783 unsafe {
17784 let b: i16 = simd_extract!(b, N as u32);
17785 vqdmullh_s16(a, b)
17786 }
17787}
17788#[doc = "Signed saturating doubling multiply long"]
17789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17790#[inline(always)]
17791#[target_feature(enable = "neon")]
17792#[cfg_attr(test, assert_instr(sqdmull))]
17793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17794pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17795 let a: int16x4_t = vdup_n_s16(a);
17796 let b: int16x4_t = vdup_n_s16(b);
17797 unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17798}
17799#[doc = "Signed saturating doubling multiply long"]
17800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17801#[inline(always)]
17802#[target_feature(enable = "neon")]
17803#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17804#[rustc_legacy_const_generics(2)]
17805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17806pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17807 static_assert_uimm_bits!(N, 1);
17808 unsafe {
17809 let b: i32 = simd_extract!(b, N as u32);
17810 vqdmulls_s32(a, b)
17811 }
17812}
17813#[doc = "Signed saturating doubling multiply long"]
17814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17815#[inline(always)]
17816#[target_feature(enable = "neon")]
17817#[cfg_attr(test, assert_instr(sqdmull))]
17818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17819pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17820 unsafe extern "unadjusted" {
17821 #[cfg_attr(
17822 any(target_arch = "aarch64", target_arch = "arm64ec"),
17823 link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17824 )]
17825 fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17826 }
17827 unsafe { _vqdmulls_s32(a, b) }
17828}
17829#[doc = "Signed saturating extract narrow"]
17830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17831#[inline(always)]
17832#[target_feature(enable = "neon")]
17833#[cfg_attr(test, assert_instr(sqxtn2))]
17834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17835pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17836 unsafe {
17837 simd_shuffle!(
17838 a,
17839 vqmovn_s16(b),
17840 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17841 )
17842 }
17843}
17844#[doc = "Signed saturating extract narrow"]
17845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17846#[inline(always)]
17847#[target_feature(enable = "neon")]
17848#[cfg_attr(test, assert_instr(sqxtn2))]
17849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17850pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17851 unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17852}
17853#[doc = "Signed saturating extract narrow"]
17854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17855#[inline(always)]
17856#[target_feature(enable = "neon")]
17857#[cfg_attr(test, assert_instr(sqxtn2))]
17858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17859pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17860 unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17861}
17862#[doc = "Signed saturating extract narrow"]
17863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17864#[inline(always)]
17865#[target_feature(enable = "neon")]
17866#[cfg_attr(test, assert_instr(uqxtn2))]
17867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17868pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17869 unsafe {
17870 simd_shuffle!(
17871 a,
17872 vqmovn_u16(b),
17873 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17874 )
17875 }
17876}
17877#[doc = "Signed saturating extract narrow"]
17878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17879#[inline(always)]
17880#[target_feature(enable = "neon")]
17881#[cfg_attr(test, assert_instr(uqxtn2))]
17882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17883pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17884 unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17885}
17886#[doc = "Signed saturating extract narrow"]
17887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17888#[inline(always)]
17889#[target_feature(enable = "neon")]
17890#[cfg_attr(test, assert_instr(uqxtn2))]
17891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17892pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17893 unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17894}
17895#[doc = "Saturating extract narrow"]
17896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17897#[inline(always)]
17898#[target_feature(enable = "neon")]
17899#[cfg_attr(test, assert_instr(sqxtn))]
17900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17901pub fn vqmovnd_s64(a: i64) -> i32 {
17902 unsafe extern "unadjusted" {
17903 #[cfg_attr(
17904 any(target_arch = "aarch64", target_arch = "arm64ec"),
17905 link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17906 )]
17907 fn _vqmovnd_s64(a: i64) -> i32;
17908 }
17909 unsafe { _vqmovnd_s64(a) }
17910}
17911#[doc = "Saturating extract narrow"]
17912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17913#[inline(always)]
17914#[target_feature(enable = "neon")]
17915#[cfg_attr(test, assert_instr(uqxtn))]
17916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17917pub fn vqmovnd_u64(a: u64) -> u32 {
17918 unsafe extern "unadjusted" {
17919 #[cfg_attr(
17920 any(target_arch = "aarch64", target_arch = "arm64ec"),
17921 link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17922 )]
17923 fn _vqmovnd_u64(a: u64) -> u32;
17924 }
17925 unsafe { _vqmovnd_u64(a) }
17926}
17927#[doc = "Saturating extract narrow"]
17928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17929#[inline(always)]
17930#[target_feature(enable = "neon")]
17931#[cfg_attr(test, assert_instr(sqxtn))]
17932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17933pub fn vqmovnh_s16(a: i16) -> i8 {
17934 unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17935}
17936#[doc = "Saturating extract narrow"]
17937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17938#[inline(always)]
17939#[target_feature(enable = "neon")]
17940#[cfg_attr(test, assert_instr(sqxtn))]
17941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17942pub fn vqmovns_s32(a: i32) -> i16 {
17943 unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17944}
17945#[doc = "Saturating extract narrow"]
17946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17947#[inline(always)]
17948#[target_feature(enable = "neon")]
17949#[cfg_attr(test, assert_instr(uqxtn))]
17950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17951pub fn vqmovnh_u16(a: u16) -> u8 {
17952 unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17953}
17954#[doc = "Saturating extract narrow"]
17955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17956#[inline(always)]
17957#[target_feature(enable = "neon")]
17958#[cfg_attr(test, assert_instr(uqxtn))]
17959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17960pub fn vqmovns_u32(a: u32) -> u16 {
17961 unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17962}
17963#[doc = "Signed saturating extract unsigned narrow"]
17964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17965#[inline(always)]
17966#[target_feature(enable = "neon")]
17967#[cfg_attr(test, assert_instr(sqxtun2))]
17968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17969pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17970 unsafe {
17971 simd_shuffle!(
17972 a,
17973 vqmovun_s16(b),
17974 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17975 )
17976 }
17977}
17978#[doc = "Signed saturating extract unsigned narrow"]
17979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17980#[inline(always)]
17981#[target_feature(enable = "neon")]
17982#[cfg_attr(test, assert_instr(sqxtun2))]
17983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17984pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17985 unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17986}
17987#[doc = "Signed saturating extract unsigned narrow"]
17988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17989#[inline(always)]
17990#[target_feature(enable = "neon")]
17991#[cfg_attr(test, assert_instr(sqxtun2))]
17992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17993pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17994 unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17995}
17996#[doc = "Signed saturating extract unsigned narrow"]
17997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17998#[inline(always)]
17999#[target_feature(enable = "neon")]
18000#[cfg_attr(test, assert_instr(sqxtun))]
18001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18002pub fn vqmovunh_s16(a: i16) -> u8 {
18003 unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
18004}
18005#[doc = "Signed saturating extract unsigned narrow"]
18006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
18007#[inline(always)]
18008#[target_feature(enable = "neon")]
18009#[cfg_attr(test, assert_instr(sqxtun))]
18010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18011pub fn vqmovuns_s32(a: i32) -> u16 {
18012 unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
18013}
18014#[doc = "Signed saturating extract unsigned narrow"]
18015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
18016#[inline(always)]
18017#[target_feature(enable = "neon")]
18018#[cfg_attr(test, assert_instr(sqxtun))]
18019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18020pub fn vqmovund_s64(a: i64) -> u32 {
18021 unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
18022}
18023#[doc = "Signed saturating negate"]
18024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
18025#[inline(always)]
18026#[target_feature(enable = "neon")]
18027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18028#[cfg_attr(test, assert_instr(sqneg))]
18029pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
18030 unsafe extern "unadjusted" {
18031 #[cfg_attr(
18032 any(target_arch = "aarch64", target_arch = "arm64ec"),
18033 link_name = "llvm.aarch64.neon.sqneg.v1i64"
18034 )]
18035 fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
18036 }
18037 unsafe { _vqneg_s64(a) }
18038}
18039#[doc = "Signed saturating negate"]
18040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
18041#[inline(always)]
18042#[target_feature(enable = "neon")]
18043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18044#[cfg_attr(test, assert_instr(sqneg))]
18045pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
18046 unsafe extern "unadjusted" {
18047 #[cfg_attr(
18048 any(target_arch = "aarch64", target_arch = "arm64ec"),
18049 link_name = "llvm.aarch64.neon.sqneg.v2i64"
18050 )]
18051 fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
18052 }
18053 unsafe { _vqnegq_s64(a) }
18054}
18055#[doc = "Signed saturating negate"]
18056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
18057#[inline(always)]
18058#[target_feature(enable = "neon")]
18059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18060#[cfg_attr(test, assert_instr(sqneg))]
18061pub fn vqnegb_s8(a: i8) -> i8 {
18062 unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
18063}
18064#[doc = "Signed saturating negate"]
18065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
18066#[inline(always)]
18067#[target_feature(enable = "neon")]
18068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18069#[cfg_attr(test, assert_instr(sqneg))]
18070pub fn vqnegh_s16(a: i16) -> i16 {
18071 unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
18072}
18073#[doc = "Signed saturating negate"]
18074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
18075#[inline(always)]
18076#[target_feature(enable = "neon")]
18077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18078#[cfg_attr(test, assert_instr(sqneg))]
18079pub fn vqnegs_s32(a: i32) -> i32 {
18080 unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
18081}
18082#[doc = "Signed saturating negate"]
18083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
18084#[inline(always)]
18085#[target_feature(enable = "neon")]
18086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18087#[cfg_attr(test, assert_instr(sqneg))]
18088pub fn vqnegd_s64(a: i64) -> i64 {
18089 unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
18090}
18091#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
18093#[inline(always)]
18094#[target_feature(enable = "rdm")]
18095#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18096#[rustc_legacy_const_generics(3)]
18097#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18098pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18099 static_assert_uimm_bits!(LANE, 2);
18100 unsafe {
18101 let c: int16x4_t =
18102 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18103 vqrdmlah_s16(a, b, c)
18104 }
18105}
18106#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
18108#[inline(always)]
18109#[target_feature(enable = "rdm")]
18110#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18111#[rustc_legacy_const_generics(3)]
18112#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18113pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18114 static_assert_uimm_bits!(LANE, 1);
18115 unsafe {
18116 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18117 vqrdmlah_s32(a, b, c)
18118 }
18119}
18120#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
18122#[inline(always)]
18123#[target_feature(enable = "rdm")]
18124#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18125#[rustc_legacy_const_generics(3)]
18126#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18127pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18128 static_assert_uimm_bits!(LANE, 3);
18129 unsafe {
18130 let c: int16x4_t =
18131 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18132 vqrdmlah_s16(a, b, c)
18133 }
18134}
18135#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
18137#[inline(always)]
18138#[target_feature(enable = "rdm")]
18139#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18140#[rustc_legacy_const_generics(3)]
18141#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18142pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18143 static_assert_uimm_bits!(LANE, 2);
18144 unsafe {
18145 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18146 vqrdmlah_s32(a, b, c)
18147 }
18148}
18149#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
18151#[inline(always)]
18152#[target_feature(enable = "rdm")]
18153#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18154#[rustc_legacy_const_generics(3)]
18155#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18156pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18157 static_assert_uimm_bits!(LANE, 2);
18158 unsafe {
18159 let c: int16x8_t = simd_shuffle!(
18160 c,
18161 c,
18162 [
18163 LANE as u32,
18164 LANE as u32,
18165 LANE as u32,
18166 LANE as u32,
18167 LANE as u32,
18168 LANE as u32,
18169 LANE as u32,
18170 LANE as u32
18171 ]
18172 );
18173 vqrdmlahq_s16(a, b, c)
18174 }
18175}
18176#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
18178#[inline(always)]
18179#[target_feature(enable = "rdm")]
18180#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18181#[rustc_legacy_const_generics(3)]
18182#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18183pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18184 static_assert_uimm_bits!(LANE, 1);
18185 unsafe {
18186 let c: int32x4_t =
18187 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18188 vqrdmlahq_s32(a, b, c)
18189 }
18190}
18191#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18193#[inline(always)]
18194#[target_feature(enable = "rdm")]
18195#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18196#[rustc_legacy_const_generics(3)]
18197#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18198pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18199 static_assert_uimm_bits!(LANE, 3);
18200 unsafe {
18201 let c: int16x8_t = simd_shuffle!(
18202 c,
18203 c,
18204 [
18205 LANE as u32,
18206 LANE as u32,
18207 LANE as u32,
18208 LANE as u32,
18209 LANE as u32,
18210 LANE as u32,
18211 LANE as u32,
18212 LANE as u32
18213 ]
18214 );
18215 vqrdmlahq_s16(a, b, c)
18216 }
18217}
18218#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18220#[inline(always)]
18221#[target_feature(enable = "rdm")]
18222#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18223#[rustc_legacy_const_generics(3)]
18224#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18225pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18226 static_assert_uimm_bits!(LANE, 2);
18227 unsafe {
18228 let c: int32x4_t =
18229 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18230 vqrdmlahq_s32(a, b, c)
18231 }
18232}
18233#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18235#[inline(always)]
18236#[target_feature(enable = "rdm")]
18237#[cfg_attr(test, assert_instr(sqrdmlah))]
18238#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18239pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18240 unsafe extern "unadjusted" {
18241 #[cfg_attr(
18242 any(target_arch = "aarch64", target_arch = "arm64ec"),
18243 link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18244 )]
18245 fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18246 }
18247 unsafe { _vqrdmlah_s16(a, b, c) }
18248}
18249#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18251#[inline(always)]
18252#[target_feature(enable = "rdm")]
18253#[cfg_attr(test, assert_instr(sqrdmlah))]
18254#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18255pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18256 unsafe extern "unadjusted" {
18257 #[cfg_attr(
18258 any(target_arch = "aarch64", target_arch = "arm64ec"),
18259 link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18260 )]
18261 fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18262 }
18263 unsafe { _vqrdmlahq_s16(a, b, c) }
18264}
18265#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18267#[inline(always)]
18268#[target_feature(enable = "rdm")]
18269#[cfg_attr(test, assert_instr(sqrdmlah))]
18270#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18271pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18272 unsafe extern "unadjusted" {
18273 #[cfg_attr(
18274 any(target_arch = "aarch64", target_arch = "arm64ec"),
18275 link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18276 )]
18277 fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18278 }
18279 unsafe { _vqrdmlah_s32(a, b, c) }
18280}
18281#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18283#[inline(always)]
18284#[target_feature(enable = "rdm")]
18285#[cfg_attr(test, assert_instr(sqrdmlah))]
18286#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18287pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18288 unsafe extern "unadjusted" {
18289 #[cfg_attr(
18290 any(target_arch = "aarch64", target_arch = "arm64ec"),
18291 link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18292 )]
18293 fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18294 }
18295 unsafe { _vqrdmlahq_s32(a, b, c) }
18296}
18297#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18299#[inline(always)]
18300#[target_feature(enable = "rdm")]
18301#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18302#[rustc_legacy_const_generics(3)]
18303#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18304pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18305 static_assert_uimm_bits!(LANE, 2);
18306 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18307}
18308#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18310#[inline(always)]
18311#[target_feature(enable = "rdm")]
18312#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18313#[rustc_legacy_const_generics(3)]
18314#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18315pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18316 static_assert_uimm_bits!(LANE, 3);
18317 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18318}
18319#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18321#[inline(always)]
18322#[target_feature(enable = "rdm")]
18323#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18324#[rustc_legacy_const_generics(3)]
18325#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18326pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18327 static_assert_uimm_bits!(LANE, 1);
18328 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18329}
18330#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18332#[inline(always)]
18333#[target_feature(enable = "rdm")]
18334#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18335#[rustc_legacy_const_generics(3)]
18336#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18337pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18338 static_assert_uimm_bits!(LANE, 2);
18339 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18340}
18341#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18343#[inline(always)]
18344#[target_feature(enable = "rdm")]
18345#[cfg_attr(test, assert_instr(sqrdmlah))]
18346#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18347pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18348 let a: int16x4_t = vdup_n_s16(a);
18349 let b: int16x4_t = vdup_n_s16(b);
18350 let c: int16x4_t = vdup_n_s16(c);
18351 unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18352}
18353#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18355#[inline(always)]
18356#[target_feature(enable = "rdm")]
18357#[cfg_attr(test, assert_instr(sqrdmlah))]
18358#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18359pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18360 let a: int32x2_t = vdup_n_s32(a);
18361 let b: int32x2_t = vdup_n_s32(b);
18362 let c: int32x2_t = vdup_n_s32(c);
18363 unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18364}
18365#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18367#[inline(always)]
18368#[target_feature(enable = "rdm")]
18369#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18370#[rustc_legacy_const_generics(3)]
18371#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18372pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18373 static_assert_uimm_bits!(LANE, 2);
18374 unsafe {
18375 let c: int16x4_t =
18376 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18377 vqrdmlsh_s16(a, b, c)
18378 }
18379}
18380#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18382#[inline(always)]
18383#[target_feature(enable = "rdm")]
18384#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18385#[rustc_legacy_const_generics(3)]
18386#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18387pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18388 static_assert_uimm_bits!(LANE, 1);
18389 unsafe {
18390 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18391 vqrdmlsh_s32(a, b, c)
18392 }
18393}
18394#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18396#[inline(always)]
18397#[target_feature(enable = "rdm")]
18398#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18399#[rustc_legacy_const_generics(3)]
18400#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18401pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18402 static_assert_uimm_bits!(LANE, 3);
18403 unsafe {
18404 let c: int16x4_t =
18405 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18406 vqrdmlsh_s16(a, b, c)
18407 }
18408}
18409#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18411#[inline(always)]
18412#[target_feature(enable = "rdm")]
18413#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18414#[rustc_legacy_const_generics(3)]
18415#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18416pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18417 static_assert_uimm_bits!(LANE, 2);
18418 unsafe {
18419 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18420 vqrdmlsh_s32(a, b, c)
18421 }
18422}
18423#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18425#[inline(always)]
18426#[target_feature(enable = "rdm")]
18427#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18428#[rustc_legacy_const_generics(3)]
18429#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18430pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18431 static_assert_uimm_bits!(LANE, 2);
18432 unsafe {
18433 let c: int16x8_t = simd_shuffle!(
18434 c,
18435 c,
18436 [
18437 LANE as u32,
18438 LANE as u32,
18439 LANE as u32,
18440 LANE as u32,
18441 LANE as u32,
18442 LANE as u32,
18443 LANE as u32,
18444 LANE as u32
18445 ]
18446 );
18447 vqrdmlshq_s16(a, b, c)
18448 }
18449}
18450#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18452#[inline(always)]
18453#[target_feature(enable = "rdm")]
18454#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18455#[rustc_legacy_const_generics(3)]
18456#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18457pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18458 static_assert_uimm_bits!(LANE, 1);
18459 unsafe {
18460 let c: int32x4_t =
18461 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18462 vqrdmlshq_s32(a, b, c)
18463 }
18464}
18465#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18467#[inline(always)]
18468#[target_feature(enable = "rdm")]
18469#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18470#[rustc_legacy_const_generics(3)]
18471#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18472pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18473 static_assert_uimm_bits!(LANE, 3);
18474 unsafe {
18475 let c: int16x8_t = simd_shuffle!(
18476 c,
18477 c,
18478 [
18479 LANE as u32,
18480 LANE as u32,
18481 LANE as u32,
18482 LANE as u32,
18483 LANE as u32,
18484 LANE as u32,
18485 LANE as u32,
18486 LANE as u32
18487 ]
18488 );
18489 vqrdmlshq_s16(a, b, c)
18490 }
18491}
18492#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18494#[inline(always)]
18495#[target_feature(enable = "rdm")]
18496#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18497#[rustc_legacy_const_generics(3)]
18498#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18499pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18500 static_assert_uimm_bits!(LANE, 2);
18501 unsafe {
18502 let c: int32x4_t =
18503 simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18504 vqrdmlshq_s32(a, b, c)
18505 }
18506}
18507#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18509#[inline(always)]
18510#[target_feature(enable = "rdm")]
18511#[cfg_attr(test, assert_instr(sqrdmlsh))]
18512#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18513pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18514 unsafe extern "unadjusted" {
18515 #[cfg_attr(
18516 any(target_arch = "aarch64", target_arch = "arm64ec"),
18517 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18518 )]
18519 fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18520 }
18521 unsafe { _vqrdmlsh_s16(a, b, c) }
18522}
18523#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18525#[inline(always)]
18526#[target_feature(enable = "rdm")]
18527#[cfg_attr(test, assert_instr(sqrdmlsh))]
18528#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18529pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18530 unsafe extern "unadjusted" {
18531 #[cfg_attr(
18532 any(target_arch = "aarch64", target_arch = "arm64ec"),
18533 link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18534 )]
18535 fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18536 }
18537 unsafe { _vqrdmlshq_s16(a, b, c) }
18538}
18539#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18541#[inline(always)]
18542#[target_feature(enable = "rdm")]
18543#[cfg_attr(test, assert_instr(sqrdmlsh))]
18544#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18545pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18546 unsafe extern "unadjusted" {
18547 #[cfg_attr(
18548 any(target_arch = "aarch64", target_arch = "arm64ec"),
18549 link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18550 )]
18551 fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18552 }
18553 unsafe { _vqrdmlsh_s32(a, b, c) }
18554}
18555#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18557#[inline(always)]
18558#[target_feature(enable = "rdm")]
18559#[cfg_attr(test, assert_instr(sqrdmlsh))]
18560#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18561pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18562 unsafe extern "unadjusted" {
18563 #[cfg_attr(
18564 any(target_arch = "aarch64", target_arch = "arm64ec"),
18565 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18566 )]
18567 fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18568 }
18569 unsafe { _vqrdmlshq_s32(a, b, c) }
18570}
18571#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18573#[inline(always)]
18574#[target_feature(enable = "rdm")]
18575#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18576#[rustc_legacy_const_generics(3)]
18577#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18578pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18579 static_assert_uimm_bits!(LANE, 2);
18580 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18581}
18582#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18584#[inline(always)]
18585#[target_feature(enable = "rdm")]
18586#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18587#[rustc_legacy_const_generics(3)]
18588#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18589pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18590 static_assert_uimm_bits!(LANE, 3);
18591 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18592}
18593#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18595#[inline(always)]
18596#[target_feature(enable = "rdm")]
18597#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18598#[rustc_legacy_const_generics(3)]
18599#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18600pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18601 static_assert_uimm_bits!(LANE, 1);
18602 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18603}
18604#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18606#[inline(always)]
18607#[target_feature(enable = "rdm")]
18608#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18609#[rustc_legacy_const_generics(3)]
18610#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18611pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18612 static_assert_uimm_bits!(LANE, 2);
18613 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18614}
18615#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18617#[inline(always)]
18618#[target_feature(enable = "rdm")]
18619#[cfg_attr(test, assert_instr(sqrdmlsh))]
18620#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18621pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18622 let a: int16x4_t = vdup_n_s16(a);
18623 let b: int16x4_t = vdup_n_s16(b);
18624 let c: int16x4_t = vdup_n_s16(c);
18625 unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18626}
18627#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18629#[inline(always)]
18630#[target_feature(enable = "rdm")]
18631#[cfg_attr(test, assert_instr(sqrdmlsh))]
18632#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18633pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18634 let a: int32x2_t = vdup_n_s32(a);
18635 let b: int32x2_t = vdup_n_s32(b);
18636 let c: int32x2_t = vdup_n_s32(c);
18637 unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18638}
18639#[doc = "Signed saturating rounding doubling multiply returning high half"]
18640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18641#[inline(always)]
18642#[target_feature(enable = "neon")]
18643#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18644#[rustc_legacy_const_generics(2)]
18645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18646pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18647 static_assert_uimm_bits!(LANE, 2);
18648 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18649}
18650#[doc = "Signed saturating rounding doubling multiply returning high half"]
18651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18652#[inline(always)]
18653#[target_feature(enable = "neon")]
18654#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18655#[rustc_legacy_const_generics(2)]
18656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18657pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18658 static_assert_uimm_bits!(LANE, 3);
18659 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18660}
18661#[doc = "Signed saturating rounding doubling multiply returning high half"]
18662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18663#[inline(always)]
18664#[target_feature(enable = "neon")]
18665#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18666#[rustc_legacy_const_generics(2)]
18667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18668pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18669 static_assert_uimm_bits!(LANE, 1);
18670 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18671}
18672#[doc = "Signed saturating rounding doubling multiply returning high half"]
18673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18674#[inline(always)]
18675#[target_feature(enable = "neon")]
18676#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18677#[rustc_legacy_const_generics(2)]
18678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18679pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18680 static_assert_uimm_bits!(LANE, 2);
18681 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18682}
18683#[doc = "Signed saturating rounding doubling multiply returning high half"]
18684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18685#[inline(always)]
18686#[target_feature(enable = "neon")]
18687#[cfg_attr(test, assert_instr(sqrdmulh))]
18688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18689pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18690 unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18691}
18692#[doc = "Signed saturating rounding doubling multiply returning high half"]
18693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18694#[inline(always)]
18695#[target_feature(enable = "neon")]
18696#[cfg_attr(test, assert_instr(sqrdmulh))]
18697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18698pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18699 unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18700}
18701#[doc = "Signed saturating rounding shift left"]
18702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18703#[inline(always)]
18704#[target_feature(enable = "neon")]
18705#[cfg_attr(test, assert_instr(sqrshl))]
18706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18707pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18708 let a: int8x8_t = vdup_n_s8(a);
18709 let b: int8x8_t = vdup_n_s8(b);
18710 unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18711}
18712#[doc = "Signed saturating rounding shift left"]
18713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18714#[inline(always)]
18715#[target_feature(enable = "neon")]
18716#[cfg_attr(test, assert_instr(sqrshl))]
18717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18718pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18719 let a: int16x4_t = vdup_n_s16(a);
18720 let b: int16x4_t = vdup_n_s16(b);
18721 unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18722}
18723#[doc = "Unsigned signed saturating rounding shift left"]
18724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18725#[inline(always)]
18726#[target_feature(enable = "neon")]
18727#[cfg_attr(test, assert_instr(uqrshl))]
18728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18729pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18730 let a: uint8x8_t = vdup_n_u8(a);
18731 let b: int8x8_t = vdup_n_s8(b);
18732 unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18733}
18734#[doc = "Unsigned signed saturating rounding shift left"]
18735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18736#[inline(always)]
18737#[target_feature(enable = "neon")]
18738#[cfg_attr(test, assert_instr(uqrshl))]
18739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18740pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18741 let a: uint16x4_t = vdup_n_u16(a);
18742 let b: int16x4_t = vdup_n_s16(b);
18743 unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18744}
18745#[doc = "Signed saturating rounding shift left"]
18746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18747#[inline(always)]
18748#[target_feature(enable = "neon")]
18749#[cfg_attr(test, assert_instr(sqrshl))]
18750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18751pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18752 unsafe extern "unadjusted" {
18753 #[cfg_attr(
18754 any(target_arch = "aarch64", target_arch = "arm64ec"),
18755 link_name = "llvm.aarch64.neon.sqrshl.i64"
18756 )]
18757 fn _vqrshld_s64(a: i64, b: i64) -> i64;
18758 }
18759 unsafe { _vqrshld_s64(a, b) }
18760}
18761#[doc = "Signed saturating rounding shift left"]
18762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18763#[inline(always)]
18764#[target_feature(enable = "neon")]
18765#[cfg_attr(test, assert_instr(sqrshl))]
18766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18767pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18768 unsafe extern "unadjusted" {
18769 #[cfg_attr(
18770 any(target_arch = "aarch64", target_arch = "arm64ec"),
18771 link_name = "llvm.aarch64.neon.sqrshl.i32"
18772 )]
18773 fn _vqrshls_s32(a: i32, b: i32) -> i32;
18774 }
18775 unsafe { _vqrshls_s32(a, b) }
18776}
18777#[doc = "Unsigned signed saturating rounding shift left"]
18778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18779#[inline(always)]
18780#[target_feature(enable = "neon")]
18781#[cfg_attr(test, assert_instr(uqrshl))]
18782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18783pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18784 unsafe extern "unadjusted" {
18785 #[cfg_attr(
18786 any(target_arch = "aarch64", target_arch = "arm64ec"),
18787 link_name = "llvm.aarch64.neon.uqrshl.i32"
18788 )]
18789 fn _vqrshls_u32(a: u32, b: i32) -> u32;
18790 }
18791 unsafe { _vqrshls_u32(a, b) }
18792}
18793#[doc = "Unsigned signed saturating rounding shift left"]
18794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18795#[inline(always)]
18796#[target_feature(enable = "neon")]
18797#[cfg_attr(test, assert_instr(uqrshl))]
18798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18799pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18800 unsafe extern "unadjusted" {
18801 #[cfg_attr(
18802 any(target_arch = "aarch64", target_arch = "arm64ec"),
18803 link_name = "llvm.aarch64.neon.uqrshl.i64"
18804 )]
18805 fn _vqrshld_u64(a: u64, b: i64) -> u64;
18806 }
18807 unsafe { _vqrshld_u64(a, b) }
18808}
18809#[doc = "Signed saturating rounded shift right narrow"]
18810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18811#[inline(always)]
18812#[target_feature(enable = "neon")]
18813#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18814#[rustc_legacy_const_generics(2)]
18815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18816pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18817 static_assert!(N >= 1 && N <= 8);
18818 unsafe {
18819 simd_shuffle!(
18820 a,
18821 vqrshrn_n_s16::<N>(b),
18822 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18823 )
18824 }
18825}
18826#[doc = "Signed saturating rounded shift right narrow"]
18827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18828#[inline(always)]
18829#[target_feature(enable = "neon")]
18830#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18831#[rustc_legacy_const_generics(2)]
18832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18833pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18834 static_assert!(N >= 1 && N <= 16);
18835 unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18836}
18837#[doc = "Signed saturating rounded shift right narrow"]
18838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18839#[inline(always)]
18840#[target_feature(enable = "neon")]
18841#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18842#[rustc_legacy_const_generics(2)]
18843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18844pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18845 static_assert!(N >= 1 && N <= 32);
18846 unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18847}
18848#[doc = "Unsigned saturating rounded shift right narrow"]
18849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18850#[inline(always)]
18851#[target_feature(enable = "neon")]
18852#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18853#[rustc_legacy_const_generics(2)]
18854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18855pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18856 static_assert!(N >= 1 && N <= 8);
18857 unsafe {
18858 simd_shuffle!(
18859 a,
18860 vqrshrn_n_u16::<N>(b),
18861 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18862 )
18863 }
18864}
18865#[doc = "Unsigned saturating rounded shift right narrow"]
18866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18867#[inline(always)]
18868#[target_feature(enable = "neon")]
18869#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18870#[rustc_legacy_const_generics(2)]
18871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18872pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18873 static_assert!(N >= 1 && N <= 16);
18874 unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18875}
18876#[doc = "Unsigned saturating rounded shift right narrow"]
18877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18878#[inline(always)]
18879#[target_feature(enable = "neon")]
18880#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18881#[rustc_legacy_const_generics(2)]
18882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18883pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18884 static_assert!(N >= 1 && N <= 32);
18885 unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18886}
18887#[doc = "Unsigned saturating rounded shift right narrow"]
18888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18889#[inline(always)]
18890#[target_feature(enable = "neon")]
18891#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18892#[rustc_legacy_const_generics(1)]
18893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18894pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18895 static_assert!(N >= 1 && N <= 32);
18896 let a: uint64x2_t = vdupq_n_u64(a);
18897 unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18898}
18899#[doc = "Unsigned saturating rounded shift right narrow"]
18900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18901#[inline(always)]
18902#[target_feature(enable = "neon")]
18903#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18904#[rustc_legacy_const_generics(1)]
18905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18906pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18907 static_assert!(N >= 1 && N <= 8);
18908 let a: uint16x8_t = vdupq_n_u16(a);
18909 unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18910}
18911#[doc = "Unsigned saturating rounded shift right narrow"]
18912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18913#[inline(always)]
18914#[target_feature(enable = "neon")]
18915#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18916#[rustc_legacy_const_generics(1)]
18917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18918pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18919 static_assert!(N >= 1 && N <= 16);
18920 let a: uint32x4_t = vdupq_n_u32(a);
18921 unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18922}
18923#[doc = "Signed saturating rounded shift right narrow"]
18924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18925#[inline(always)]
18926#[target_feature(enable = "neon")]
18927#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18928#[rustc_legacy_const_generics(1)]
18929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18930pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18931 static_assert!(N >= 1 && N <= 8);
18932 let a: int16x8_t = vdupq_n_s16(a);
18933 unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18934}
18935#[doc = "Signed saturating rounded shift right narrow"]
18936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18937#[inline(always)]
18938#[target_feature(enable = "neon")]
18939#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18940#[rustc_legacy_const_generics(1)]
18941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18942pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18943 static_assert!(N >= 1 && N <= 16);
18944 let a: int32x4_t = vdupq_n_s32(a);
18945 unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18946}
18947#[doc = "Signed saturating rounded shift right narrow"]
18948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18949#[inline(always)]
18950#[target_feature(enable = "neon")]
18951#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18952#[rustc_legacy_const_generics(1)]
18953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18954pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18955 static_assert!(N >= 1 && N <= 32);
18956 let a: int64x2_t = vdupq_n_s64(a);
18957 unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18958}
18959#[doc = "Signed saturating rounded shift right unsigned narrow"]
18960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18961#[inline(always)]
18962#[target_feature(enable = "neon")]
18963#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18964#[rustc_legacy_const_generics(2)]
18965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18966pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18967 static_assert!(N >= 1 && N <= 8);
18968 unsafe {
18969 simd_shuffle!(
18970 a,
18971 vqrshrun_n_s16::<N>(b),
18972 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18973 )
18974 }
18975}
18976#[doc = "Signed saturating rounded shift right unsigned narrow"]
18977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18978#[inline(always)]
18979#[target_feature(enable = "neon")]
18980#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18981#[rustc_legacy_const_generics(2)]
18982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18983pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18984 static_assert!(N >= 1 && N <= 16);
18985 unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18986}
18987#[doc = "Signed saturating rounded shift right unsigned narrow"]
18988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18989#[inline(always)]
18990#[target_feature(enable = "neon")]
18991#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18992#[rustc_legacy_const_generics(2)]
18993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18994pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18995 static_assert!(N >= 1 && N <= 32);
18996 unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18997}
18998#[doc = "Signed saturating rounded shift right unsigned narrow"]
18999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
19000#[inline(always)]
19001#[target_feature(enable = "neon")]
19002#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19003#[rustc_legacy_const_generics(1)]
19004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19005pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
19006 static_assert!(N >= 1 && N <= 32);
19007 let a: int64x2_t = vdupq_n_s64(a);
19008 unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
19009}
19010#[doc = "Signed saturating rounded shift right unsigned narrow"]
19011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
19012#[inline(always)]
19013#[target_feature(enable = "neon")]
19014#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19015#[rustc_legacy_const_generics(1)]
19016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19017pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19018 static_assert!(N >= 1 && N <= 8);
19019 let a: int16x8_t = vdupq_n_s16(a);
19020 unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
19021}
19022#[doc = "Signed saturating rounded shift right unsigned narrow"]
19023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
19024#[inline(always)]
19025#[target_feature(enable = "neon")]
19026#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19027#[rustc_legacy_const_generics(1)]
19028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19029pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
19030 static_assert!(N >= 1 && N <= 16);
19031 let a: int32x4_t = vdupq_n_s32(a);
19032 unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
19033}
19034#[doc = "Signed saturating shift left"]
19035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
19036#[inline(always)]
19037#[target_feature(enable = "neon")]
19038#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19039#[rustc_legacy_const_generics(1)]
19040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19041pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
19042 static_assert_uimm_bits!(N, 3);
19043 unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
19044}
19045#[doc = "Signed saturating shift left"]
19046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
19047#[inline(always)]
19048#[target_feature(enable = "neon")]
19049#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19050#[rustc_legacy_const_generics(1)]
19051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19052pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
19053 static_assert_uimm_bits!(N, 6);
19054 unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
19055}
19056#[doc = "Signed saturating shift left"]
19057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
19058#[inline(always)]
19059#[target_feature(enable = "neon")]
19060#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19061#[rustc_legacy_const_generics(1)]
19062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19063pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
19064 static_assert_uimm_bits!(N, 4);
19065 unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
19066}
19067#[doc = "Signed saturating shift left"]
19068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
19069#[inline(always)]
19070#[target_feature(enable = "neon")]
19071#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19072#[rustc_legacy_const_generics(1)]
19073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19074pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
19075 static_assert_uimm_bits!(N, 5);
19076 unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
19077}
19078#[doc = "Unsigned saturating shift left"]
19079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
19080#[inline(always)]
19081#[target_feature(enable = "neon")]
19082#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19083#[rustc_legacy_const_generics(1)]
19084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19085pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
19086 static_assert_uimm_bits!(N, 3);
19087 unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
19088}
19089#[doc = "Unsigned saturating shift left"]
19090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
19091#[inline(always)]
19092#[target_feature(enable = "neon")]
19093#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19094#[rustc_legacy_const_generics(1)]
19095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19096pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
19097 static_assert_uimm_bits!(N, 6);
19098 unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
19099}
19100#[doc = "Unsigned saturating shift left"]
19101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
19102#[inline(always)]
19103#[target_feature(enable = "neon")]
19104#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19105#[rustc_legacy_const_generics(1)]
19106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19107pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
19108 static_assert_uimm_bits!(N, 4);
19109 unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
19110}
19111#[doc = "Unsigned saturating shift left"]
19112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
19113#[inline(always)]
19114#[target_feature(enable = "neon")]
19115#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19116#[rustc_legacy_const_generics(1)]
19117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19118pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
19119 static_assert_uimm_bits!(N, 5);
19120 unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
19121}
19122#[doc = "Signed saturating shift left"]
19123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
19124#[inline(always)]
19125#[target_feature(enable = "neon")]
19126#[cfg_attr(test, assert_instr(sqshl))]
19127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19128pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
19129 let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
19130 unsafe { simd_extract!(c, 0) }
19131}
19132#[doc = "Signed saturating shift left"]
19133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
19134#[inline(always)]
19135#[target_feature(enable = "neon")]
19136#[cfg_attr(test, assert_instr(sqshl))]
19137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19138pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
19139 let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
19140 unsafe { simd_extract!(c, 0) }
19141}
19142#[doc = "Signed saturating shift left"]
19143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
19144#[inline(always)]
19145#[target_feature(enable = "neon")]
19146#[cfg_attr(test, assert_instr(sqshl))]
19147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19148pub fn vqshls_s32(a: i32, b: i32) -> i32 {
19149 let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
19150 unsafe { simd_extract!(c, 0) }
19151}
19152#[doc = "Unsigned saturating shift left"]
19153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
19154#[inline(always)]
19155#[target_feature(enable = "neon")]
19156#[cfg_attr(test, assert_instr(uqshl))]
19157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19158pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
19159 let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
19160 unsafe { simd_extract!(c, 0) }
19161}
19162#[doc = "Unsigned saturating shift left"]
19163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
19164#[inline(always)]
19165#[target_feature(enable = "neon")]
19166#[cfg_attr(test, assert_instr(uqshl))]
19167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19168pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
19169 let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
19170 unsafe { simd_extract!(c, 0) }
19171}
19172#[doc = "Unsigned saturating shift left"]
19173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
19174#[inline(always)]
19175#[target_feature(enable = "neon")]
19176#[cfg_attr(test, assert_instr(uqshl))]
19177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19178pub fn vqshls_u32(a: u32, b: i32) -> u32 {
19179 let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
19180 unsafe { simd_extract!(c, 0) }
19181}
19182#[doc = "Signed saturating shift left"]
19183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19184#[inline(always)]
19185#[target_feature(enable = "neon")]
19186#[cfg_attr(test, assert_instr(sqshl))]
19187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19188pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19189 unsafe extern "unadjusted" {
19190 #[cfg_attr(
19191 any(target_arch = "aarch64", target_arch = "arm64ec"),
19192 link_name = "llvm.aarch64.neon.sqshl.i64"
19193 )]
19194 fn _vqshld_s64(a: i64, b: i64) -> i64;
19195 }
19196 unsafe { _vqshld_s64(a, b) }
19197}
19198#[doc = "Unsigned saturating shift left"]
19199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19200#[inline(always)]
19201#[target_feature(enable = "neon")]
19202#[cfg_attr(test, assert_instr(uqshl))]
19203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19204pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19205 unsafe extern "unadjusted" {
19206 #[cfg_attr(
19207 any(target_arch = "aarch64", target_arch = "arm64ec"),
19208 link_name = "llvm.aarch64.neon.uqshl.i64"
19209 )]
19210 fn _vqshld_u64(a: u64, b: i64) -> u64;
19211 }
19212 unsafe { _vqshld_u64(a, b) }
19213}
19214#[doc = "Signed saturating shift left unsigned"]
19215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19216#[inline(always)]
19217#[target_feature(enable = "neon")]
19218#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19219#[rustc_legacy_const_generics(1)]
19220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19221pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19222 static_assert_uimm_bits!(N, 3);
19223 unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19224}
19225#[doc = "Signed saturating shift left unsigned"]
19226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19227#[inline(always)]
19228#[target_feature(enable = "neon")]
19229#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19230#[rustc_legacy_const_generics(1)]
19231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19232pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19233 static_assert_uimm_bits!(N, 6);
19234 unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19235}
19236#[doc = "Signed saturating shift left unsigned"]
19237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19238#[inline(always)]
19239#[target_feature(enable = "neon")]
19240#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19241#[rustc_legacy_const_generics(1)]
19242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19243pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19244 static_assert_uimm_bits!(N, 4);
19245 unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19246}
19247#[doc = "Signed saturating shift left unsigned"]
19248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19249#[inline(always)]
19250#[target_feature(enable = "neon")]
19251#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19252#[rustc_legacy_const_generics(1)]
19253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19254pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19255 static_assert_uimm_bits!(N, 5);
19256 unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19257}
19258#[doc = "Signed saturating shift right narrow"]
19259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19260#[inline(always)]
19261#[target_feature(enable = "neon")]
19262#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19263#[rustc_legacy_const_generics(2)]
19264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19265pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19266 static_assert!(N >= 1 && N <= 8);
19267 unsafe {
19268 simd_shuffle!(
19269 a,
19270 vqshrn_n_s16::<N>(b),
19271 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19272 )
19273 }
19274}
19275#[doc = "Signed saturating shift right narrow"]
19276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19277#[inline(always)]
19278#[target_feature(enable = "neon")]
19279#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19280#[rustc_legacy_const_generics(2)]
19281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19282pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19283 static_assert!(N >= 1 && N <= 16);
19284 unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19285}
19286#[doc = "Signed saturating shift right narrow"]
19287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19288#[inline(always)]
19289#[target_feature(enable = "neon")]
19290#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19291#[rustc_legacy_const_generics(2)]
19292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19293pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19294 static_assert!(N >= 1 && N <= 32);
19295 unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19296}
19297#[doc = "Unsigned saturating shift right narrow"]
19298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19299#[inline(always)]
19300#[target_feature(enable = "neon")]
19301#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19302#[rustc_legacy_const_generics(2)]
19303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19304pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19305 static_assert!(N >= 1 && N <= 8);
19306 unsafe {
19307 simd_shuffle!(
19308 a,
19309 vqshrn_n_u16::<N>(b),
19310 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19311 )
19312 }
19313}
19314#[doc = "Unsigned saturating shift right narrow"]
19315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19316#[inline(always)]
19317#[target_feature(enable = "neon")]
19318#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19319#[rustc_legacy_const_generics(2)]
19320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19321pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19322 static_assert!(N >= 1 && N <= 16);
19323 unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19324}
19325#[doc = "Unsigned saturating shift right narrow"]
19326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19327#[inline(always)]
19328#[target_feature(enable = "neon")]
19329#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19330#[rustc_legacy_const_generics(2)]
19331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19332pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19333 static_assert!(N >= 1 && N <= 32);
19334 unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19335}
19336#[doc = "Signed saturating shift right narrow"]
19337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19338#[inline(always)]
19339#[target_feature(enable = "neon")]
19340#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19341#[rustc_legacy_const_generics(1)]
19342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19343pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19344 static_assert!(N >= 1 && N <= 32);
19345 unsafe extern "unadjusted" {
19346 #[cfg_attr(
19347 any(target_arch = "aarch64", target_arch = "arm64ec"),
19348 link_name = "llvm.aarch64.neon.sqshrn.i32"
19349 )]
19350 fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19351 }
19352 unsafe { _vqshrnd_n_s64(a, N) }
19353}
19354#[doc = "Unsigned saturating shift right narrow"]
19355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19356#[inline(always)]
19357#[target_feature(enable = "neon")]
19358#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19359#[rustc_legacy_const_generics(1)]
19360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19361pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19362 static_assert!(N >= 1 && N <= 32);
19363 unsafe extern "unadjusted" {
19364 #[cfg_attr(
19365 any(target_arch = "aarch64", target_arch = "arm64ec"),
19366 link_name = "llvm.aarch64.neon.uqshrn.i32"
19367 )]
19368 fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19369 }
19370 unsafe { _vqshrnd_n_u64(a, N) }
19371}
19372#[doc = "Signed saturating shift right narrow"]
19373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19374#[inline(always)]
19375#[target_feature(enable = "neon")]
19376#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19377#[rustc_legacy_const_generics(1)]
19378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19379pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19380 static_assert!(N >= 1 && N <= 8);
19381 unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19382}
19383#[doc = "Signed saturating shift right narrow"]
19384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19385#[inline(always)]
19386#[target_feature(enable = "neon")]
19387#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19388#[rustc_legacy_const_generics(1)]
19389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19390pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19391 static_assert!(N >= 1 && N <= 16);
19392 unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19393}
19394#[doc = "Unsigned saturating shift right narrow"]
19395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19396#[inline(always)]
19397#[target_feature(enable = "neon")]
19398#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19399#[rustc_legacy_const_generics(1)]
19400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19401pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19402 static_assert!(N >= 1 && N <= 8);
19403 unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19404}
19405#[doc = "Unsigned saturating shift right narrow"]
19406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19407#[inline(always)]
19408#[target_feature(enable = "neon")]
19409#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19410#[rustc_legacy_const_generics(1)]
19411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19412pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19413 static_assert!(N >= 1 && N <= 16);
19414 unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19415}
19416#[doc = "Signed saturating shift right unsigned narrow"]
19417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19418#[inline(always)]
19419#[target_feature(enable = "neon")]
19420#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19421#[rustc_legacy_const_generics(2)]
19422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19423pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19424 static_assert!(N >= 1 && N <= 8);
19425 unsafe {
19426 simd_shuffle!(
19427 a,
19428 vqshrun_n_s16::<N>(b),
19429 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19430 )
19431 }
19432}
19433#[doc = "Signed saturating shift right unsigned narrow"]
19434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19435#[inline(always)]
19436#[target_feature(enable = "neon")]
19437#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19438#[rustc_legacy_const_generics(2)]
19439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19440pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19441 static_assert!(N >= 1 && N <= 16);
19442 unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19443}
19444#[doc = "Signed saturating shift right unsigned narrow"]
19445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19446#[inline(always)]
19447#[target_feature(enable = "neon")]
19448#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19449#[rustc_legacy_const_generics(2)]
19450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19451pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19452 static_assert!(N >= 1 && N <= 32);
19453 unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19454}
19455#[doc = "Signed saturating shift right unsigned narrow"]
19456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19457#[inline(always)]
19458#[target_feature(enable = "neon")]
19459#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19460#[rustc_legacy_const_generics(1)]
19461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19462pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19463 static_assert!(N >= 1 && N <= 32);
19464 unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19465}
19466#[doc = "Signed saturating shift right unsigned narrow"]
19467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19468#[inline(always)]
19469#[target_feature(enable = "neon")]
19470#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19471#[rustc_legacy_const_generics(1)]
19472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19473pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19474 static_assert!(N >= 1 && N <= 8);
19475 unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19476}
19477#[doc = "Signed saturating shift right unsigned narrow"]
19478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19479#[inline(always)]
19480#[target_feature(enable = "neon")]
19481#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19482#[rustc_legacy_const_generics(1)]
19483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19484pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19485 static_assert!(N >= 1 && N <= 16);
19486 unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19487}
19488#[doc = "Saturating subtract"]
19489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19490#[inline(always)]
19491#[target_feature(enable = "neon")]
19492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19493#[cfg_attr(test, assert_instr(sqsub))]
19494pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19495 let a: int8x8_t = vdup_n_s8(a);
19496 let b: int8x8_t = vdup_n_s8(b);
19497 unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19498}
19499#[doc = "Saturating subtract"]
19500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19501#[inline(always)]
19502#[target_feature(enable = "neon")]
19503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19504#[cfg_attr(test, assert_instr(sqsub))]
19505pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19506 let a: int16x4_t = vdup_n_s16(a);
19507 let b: int16x4_t = vdup_n_s16(b);
19508 unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19509}
19510#[doc = "Saturating subtract"]
19511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19512#[inline(always)]
19513#[target_feature(enable = "neon")]
19514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19515#[cfg_attr(test, assert_instr(uqsub))]
19516pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19517 let a: uint8x8_t = vdup_n_u8(a);
19518 let b: uint8x8_t = vdup_n_u8(b);
19519 unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19520}
19521#[doc = "Saturating subtract"]
19522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19523#[inline(always)]
19524#[target_feature(enable = "neon")]
19525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19526#[cfg_attr(test, assert_instr(uqsub))]
19527pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19528 let a: uint16x4_t = vdup_n_u16(a);
19529 let b: uint16x4_t = vdup_n_u16(b);
19530 unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19531}
19532#[doc = "Saturating subtract"]
19533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19534#[inline(always)]
19535#[target_feature(enable = "neon")]
19536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19537#[cfg_attr(test, assert_instr(sqsub))]
19538pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19539 unsafe extern "unadjusted" {
19540 #[cfg_attr(
19541 any(target_arch = "aarch64", target_arch = "arm64ec"),
19542 link_name = "llvm.aarch64.neon.sqsub.i32"
19543 )]
19544 fn _vqsubs_s32(a: i32, b: i32) -> i32;
19545 }
19546 unsafe { _vqsubs_s32(a, b) }
19547}
19548#[doc = "Saturating subtract"]
19549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19550#[inline(always)]
19551#[target_feature(enable = "neon")]
19552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19553#[cfg_attr(test, assert_instr(sqsub))]
19554pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19555 unsafe extern "unadjusted" {
19556 #[cfg_attr(
19557 any(target_arch = "aarch64", target_arch = "arm64ec"),
19558 link_name = "llvm.aarch64.neon.sqsub.i64"
19559 )]
19560 fn _vqsubd_s64(a: i64, b: i64) -> i64;
19561 }
19562 unsafe { _vqsubd_s64(a, b) }
19563}
19564#[doc = "Saturating subtract"]
19565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19566#[inline(always)]
19567#[target_feature(enable = "neon")]
19568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19569#[cfg_attr(test, assert_instr(uqsub))]
19570pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19571 unsafe extern "unadjusted" {
19572 #[cfg_attr(
19573 any(target_arch = "aarch64", target_arch = "arm64ec"),
19574 link_name = "llvm.aarch64.neon.uqsub.i32"
19575 )]
19576 fn _vqsubs_u32(a: u32, b: u32) -> u32;
19577 }
19578 unsafe { _vqsubs_u32(a, b) }
19579}
19580#[doc = "Saturating subtract"]
19581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19582#[inline(always)]
19583#[target_feature(enable = "neon")]
19584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19585#[cfg_attr(test, assert_instr(uqsub))]
19586pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19587 unsafe extern "unadjusted" {
19588 #[cfg_attr(
19589 any(target_arch = "aarch64", target_arch = "arm64ec"),
19590 link_name = "llvm.aarch64.neon.uqsub.i64"
19591 )]
19592 fn _vqsubd_u64(a: u64, b: u64) -> u64;
19593 }
19594 unsafe { _vqsubd_u64(a, b) }
19595}
19596#[doc = "Table look-up"]
19597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19598#[inline(always)]
19599#[target_feature(enable = "neon")]
19600#[cfg_attr(test, assert_instr(tbl))]
19601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19602fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19603 unsafe extern "unadjusted" {
19604 #[cfg_attr(
19605 any(target_arch = "aarch64", target_arch = "arm64ec"),
19606 link_name = "llvm.aarch64.neon.tbl1.v8i8"
19607 )]
19608 fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19609 }
19610 unsafe { _vqtbl1(a, b) }
19611}
19612#[doc = "Table look-up"]
19613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19614#[inline(always)]
19615#[target_feature(enable = "neon")]
19616#[cfg_attr(test, assert_instr(tbl))]
19617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19618fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19619 unsafe extern "unadjusted" {
19620 #[cfg_attr(
19621 any(target_arch = "aarch64", target_arch = "arm64ec"),
19622 link_name = "llvm.aarch64.neon.tbl1.v16i8"
19623 )]
19624 fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19625 }
19626 unsafe { _vqtbl1q(a, b) }
19627}
19628#[doc = "Table look-up"]
19629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19630#[inline(always)]
19631#[target_feature(enable = "neon")]
19632#[cfg_attr(test, assert_instr(tbl))]
19633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19634pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19635 vqtbl1(a, b)
19636}
19637#[doc = "Table look-up"]
19638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19639#[inline(always)]
19640#[target_feature(enable = "neon")]
19641#[cfg_attr(test, assert_instr(tbl))]
19642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19643pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19644 vqtbl1q(a, b)
19645}
19646#[doc = "Table look-up"]
19647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19648#[inline(always)]
19649#[target_feature(enable = "neon")]
19650#[cfg_attr(test, assert_instr(tbl))]
19651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19652pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19653 unsafe { transmute(vqtbl1(transmute(a), b)) }
19654}
19655#[doc = "Table look-up"]
19656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19657#[inline(always)]
19658#[target_feature(enable = "neon")]
19659#[cfg_attr(test, assert_instr(tbl))]
19660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19661pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19662 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19663}
19664#[doc = "Table look-up"]
19665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19666#[inline(always)]
19667#[target_feature(enable = "neon")]
19668#[cfg_attr(test, assert_instr(tbl))]
19669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19670pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19671 unsafe { transmute(vqtbl1(transmute(a), b)) }
19672}
19673#[doc = "Table look-up"]
19674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19675#[inline(always)]
19676#[target_feature(enable = "neon")]
19677#[cfg_attr(test, assert_instr(tbl))]
19678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19679pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19680 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19681}
19682#[doc = "Table look-up"]
19683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19684#[inline(always)]
19685#[target_feature(enable = "neon")]
19686#[cfg_attr(test, assert_instr(tbl))]
19687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19688fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19689 unsafe extern "unadjusted" {
19690 #[cfg_attr(
19691 any(target_arch = "aarch64", target_arch = "arm64ec"),
19692 link_name = "llvm.aarch64.neon.tbl2.v8i8"
19693 )]
19694 fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19695 }
19696 unsafe { _vqtbl2(a, b, c) }
19697}
19698#[doc = "Table look-up"]
19699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19700#[inline(always)]
19701#[target_feature(enable = "neon")]
19702#[cfg_attr(test, assert_instr(tbl))]
19703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19704fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19705 unsafe extern "unadjusted" {
19706 #[cfg_attr(
19707 any(target_arch = "aarch64", target_arch = "arm64ec"),
19708 link_name = "llvm.aarch64.neon.tbl2.v16i8"
19709 )]
19710 fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19711 }
19712 unsafe { _vqtbl2q(a, b, c) }
19713}
19714#[doc = "Table look-up"]
19715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19716#[inline(always)]
19717#[target_feature(enable = "neon")]
19718#[cfg_attr(test, assert_instr(tbl))]
19719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19720pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19721 vqtbl2(a.0, a.1, b)
19722}
19723#[doc = "Table look-up"]
19724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19725#[inline(always)]
19726#[target_feature(enable = "neon")]
19727#[cfg_attr(test, assert_instr(tbl))]
19728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19729pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19730 vqtbl2q(a.0, a.1, b)
19731}
19732#[doc = "Table look-up"]
19733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19734#[inline(always)]
19735#[cfg(target_endian = "little")]
19736#[target_feature(enable = "neon")]
19737#[cfg_attr(test, assert_instr(tbl))]
19738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19739pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19740 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19741}
19742#[doc = "Table look-up"]
19743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19744#[inline(always)]
19745#[cfg(target_endian = "big")]
19746#[target_feature(enable = "neon")]
19747#[cfg_attr(test, assert_instr(tbl))]
19748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19749pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19750 let mut a: uint8x16x2_t = a;
19751 a.0 = unsafe {
19752 simd_shuffle!(
19753 a.0,
19754 a.0,
19755 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19756 )
19757 };
19758 a.1 = unsafe {
19759 simd_shuffle!(
19760 a.1,
19761 a.1,
19762 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19763 )
19764 };
19765 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19766 unsafe {
19767 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19768 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19769 }
19770}
19771#[doc = "Table look-up"]
19772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19773#[inline(always)]
19774#[cfg(target_endian = "little")]
19775#[target_feature(enable = "neon")]
19776#[cfg_attr(test, assert_instr(tbl))]
19777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19778pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19779 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19780}
19781#[doc = "Table look-up"]
19782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19783#[inline(always)]
19784#[cfg(target_endian = "big")]
19785#[target_feature(enable = "neon")]
19786#[cfg_attr(test, assert_instr(tbl))]
19787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19788pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19789 let mut a: uint8x16x2_t = a;
19790 a.0 = unsafe {
19791 simd_shuffle!(
19792 a.0,
19793 a.0,
19794 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19795 )
19796 };
19797 a.1 = unsafe {
19798 simd_shuffle!(
19799 a.1,
19800 a.1,
19801 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19802 )
19803 };
19804 let b: uint8x16_t =
19805 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19806 unsafe {
19807 let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19808 simd_shuffle!(
19809 ret_val,
19810 ret_val,
19811 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19812 )
19813 }
19814}
19815#[doc = "Table look-up"]
19816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19817#[inline(always)]
19818#[cfg(target_endian = "little")]
19819#[target_feature(enable = "neon")]
19820#[cfg_attr(test, assert_instr(tbl))]
19821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19822pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19823 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19824}
19825#[doc = "Table look-up"]
19826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19827#[inline(always)]
19828#[cfg(target_endian = "big")]
19829#[target_feature(enable = "neon")]
19830#[cfg_attr(test, assert_instr(tbl))]
19831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19832pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19833 let mut a: poly8x16x2_t = a;
19834 a.0 = unsafe {
19835 simd_shuffle!(
19836 a.0,
19837 a.0,
19838 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19839 )
19840 };
19841 a.1 = unsafe {
19842 simd_shuffle!(
19843 a.1,
19844 a.1,
19845 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19846 )
19847 };
19848 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19849 unsafe {
19850 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19851 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19852 }
19853}
19854#[doc = "Table look-up"]
19855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19856#[inline(always)]
19857#[cfg(target_endian = "little")]
19858#[target_feature(enable = "neon")]
19859#[cfg_attr(test, assert_instr(tbl))]
19860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19861pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19862 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19863}
19864#[doc = "Table look-up"]
19865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19866#[inline(always)]
19867#[cfg(target_endian = "big")]
19868#[target_feature(enable = "neon")]
19869#[cfg_attr(test, assert_instr(tbl))]
19870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19871pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19872 let mut a: poly8x16x2_t = a;
19873 a.0 = unsafe {
19874 simd_shuffle!(
19875 a.0,
19876 a.0,
19877 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19878 )
19879 };
19880 a.1 = unsafe {
19881 simd_shuffle!(
19882 a.1,
19883 a.1,
19884 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19885 )
19886 };
19887 let b: uint8x16_t =
19888 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19889 unsafe {
19890 let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19891 simd_shuffle!(
19892 ret_val,
19893 ret_val,
19894 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19895 )
19896 }
19897}
19898#[doc = "Table look-up"]
19899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19900#[inline(always)]
19901#[target_feature(enable = "neon")]
19902#[cfg_attr(test, assert_instr(tbl))]
19903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19904fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19905 unsafe extern "unadjusted" {
19906 #[cfg_attr(
19907 any(target_arch = "aarch64", target_arch = "arm64ec"),
19908 link_name = "llvm.aarch64.neon.tbl3.v8i8"
19909 )]
19910 fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19911 }
19912 unsafe { _vqtbl3(a, b, c, d) }
19913}
19914#[doc = "Table look-up"]
19915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19916#[inline(always)]
19917#[target_feature(enable = "neon")]
19918#[cfg_attr(test, assert_instr(tbl))]
19919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19920fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19921 unsafe extern "unadjusted" {
19922 #[cfg_attr(
19923 any(target_arch = "aarch64", target_arch = "arm64ec"),
19924 link_name = "llvm.aarch64.neon.tbl3.v16i8"
19925 )]
19926 fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19927 }
19928 unsafe { _vqtbl3q(a, b, c, d) }
19929}
19930#[doc = "Table look-up"]
19931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19932#[inline(always)]
19933#[target_feature(enable = "neon")]
19934#[cfg_attr(test, assert_instr(tbl))]
19935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19936pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19937 vqtbl3(a.0, a.1, a.2, b)
19938}
19939#[doc = "Table look-up"]
19940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19941#[inline(always)]
19942#[target_feature(enable = "neon")]
19943#[cfg_attr(test, assert_instr(tbl))]
19944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19945pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19946 vqtbl3q(a.0, a.1, a.2, b)
19947}
19948#[doc = "Table look-up"]
19949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19950#[inline(always)]
19951#[cfg(target_endian = "little")]
19952#[target_feature(enable = "neon")]
19953#[cfg_attr(test, assert_instr(tbl))]
19954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19955pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19956 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19957}
19958#[doc = "Table look-up"]
19959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19960#[inline(always)]
19961#[cfg(target_endian = "big")]
19962#[target_feature(enable = "neon")]
19963#[cfg_attr(test, assert_instr(tbl))]
19964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19965pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19966 let mut a: uint8x16x3_t = a;
19967 a.0 = unsafe {
19968 simd_shuffle!(
19969 a.0,
19970 a.0,
19971 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19972 )
19973 };
19974 a.1 = unsafe {
19975 simd_shuffle!(
19976 a.1,
19977 a.1,
19978 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19979 )
19980 };
19981 a.2 = unsafe {
19982 simd_shuffle!(
19983 a.2,
19984 a.2,
19985 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19986 )
19987 };
19988 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19989 unsafe {
19990 let ret_val: uint8x8_t =
19991 transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19992 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19993 }
19994}
19995#[doc = "Table look-up"]
19996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19997#[inline(always)]
19998#[cfg(target_endian = "little")]
19999#[target_feature(enable = "neon")]
20000#[cfg_attr(test, assert_instr(tbl))]
20001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20002pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
20003 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20004}
20005#[doc = "Table look-up"]
20006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
20007#[inline(always)]
20008#[cfg(target_endian = "big")]
20009#[target_feature(enable = "neon")]
20010#[cfg_attr(test, assert_instr(tbl))]
20011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20012pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
20013 let mut a: uint8x16x3_t = a;
20014 a.0 = unsafe {
20015 simd_shuffle!(
20016 a.0,
20017 a.0,
20018 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20019 )
20020 };
20021 a.1 = unsafe {
20022 simd_shuffle!(
20023 a.1,
20024 a.1,
20025 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20026 )
20027 };
20028 a.2 = unsafe {
20029 simd_shuffle!(
20030 a.2,
20031 a.2,
20032 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20033 )
20034 };
20035 let b: uint8x16_t =
20036 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20037 unsafe {
20038 let ret_val: uint8x16_t =
20039 transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
20040 simd_shuffle!(
20041 ret_val,
20042 ret_val,
20043 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20044 )
20045 }
20046}
20047#[doc = "Table look-up"]
20048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
20049#[inline(always)]
20050#[cfg(target_endian = "little")]
20051#[target_feature(enable = "neon")]
20052#[cfg_attr(test, assert_instr(tbl))]
20053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20054pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
20055 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20056}
20057#[doc = "Table look-up"]
20058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
20059#[inline(always)]
20060#[cfg(target_endian = "big")]
20061#[target_feature(enable = "neon")]
20062#[cfg_attr(test, assert_instr(tbl))]
20063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20064pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
20065 let mut a: poly8x16x3_t = a;
20066 a.0 = unsafe {
20067 simd_shuffle!(
20068 a.0,
20069 a.0,
20070 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20071 )
20072 };
20073 a.1 = unsafe {
20074 simd_shuffle!(
20075 a.1,
20076 a.1,
20077 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20078 )
20079 };
20080 a.2 = unsafe {
20081 simd_shuffle!(
20082 a.2,
20083 a.2,
20084 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20085 )
20086 };
20087 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20088 unsafe {
20089 let ret_val: poly8x8_t =
20090 transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
20091 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20092 }
20093}
20094#[doc = "Table look-up"]
20095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20096#[inline(always)]
20097#[cfg(target_endian = "little")]
20098#[target_feature(enable = "neon")]
20099#[cfg_attr(test, assert_instr(tbl))]
20100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20101pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20102 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20103}
20104#[doc = "Table look-up"]
20105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20106#[inline(always)]
20107#[cfg(target_endian = "big")]
20108#[target_feature(enable = "neon")]
20109#[cfg_attr(test, assert_instr(tbl))]
20110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20111pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20112 let mut a: poly8x16x3_t = a;
20113 a.0 = unsafe {
20114 simd_shuffle!(
20115 a.0,
20116 a.0,
20117 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20118 )
20119 };
20120 a.1 = unsafe {
20121 simd_shuffle!(
20122 a.1,
20123 a.1,
20124 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20125 )
20126 };
20127 a.2 = unsafe {
20128 simd_shuffle!(
20129 a.2,
20130 a.2,
20131 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20132 )
20133 };
20134 let b: uint8x16_t =
20135 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20136 unsafe {
20137 let ret_val: poly8x16_t =
20138 transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
20139 simd_shuffle!(
20140 ret_val,
20141 ret_val,
20142 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20143 )
20144 }
20145}
20146#[doc = "Table look-up"]
20147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
20148#[inline(always)]
20149#[target_feature(enable = "neon")]
20150#[cfg_attr(test, assert_instr(tbl))]
20151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20152fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20153 unsafe extern "unadjusted" {
20154 #[cfg_attr(
20155 any(target_arch = "aarch64", target_arch = "arm64ec"),
20156 link_name = "llvm.aarch64.neon.tbl4.v8i8"
20157 )]
20158 fn _vqtbl4(
20159 a: int8x16_t,
20160 b: int8x16_t,
20161 c: int8x16_t,
20162 d: int8x16_t,
20163 e: uint8x8_t,
20164 ) -> int8x8_t;
20165 }
20166 unsafe { _vqtbl4(a, b, c, d, e) }
20167}
20168#[doc = "Table look-up"]
20169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
20170#[inline(always)]
20171#[target_feature(enable = "neon")]
20172#[cfg_attr(test, assert_instr(tbl))]
20173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20174fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20175 unsafe extern "unadjusted" {
20176 #[cfg_attr(
20177 any(target_arch = "aarch64", target_arch = "arm64ec"),
20178 link_name = "llvm.aarch64.neon.tbl4.v16i8"
20179 )]
20180 fn _vqtbl4q(
20181 a: int8x16_t,
20182 b: int8x16_t,
20183 c: int8x16_t,
20184 d: int8x16_t,
20185 e: uint8x16_t,
20186 ) -> int8x16_t;
20187 }
20188 unsafe { _vqtbl4q(a, b, c, d, e) }
20189}
20190#[doc = "Table look-up"]
20191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
20192#[inline(always)]
20193#[target_feature(enable = "neon")]
20194#[cfg_attr(test, assert_instr(tbl))]
20195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20196pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
20197 vqtbl4(a.0, a.1, a.2, a.3, b)
20198}
20199#[doc = "Table look-up"]
20200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
20201#[inline(always)]
20202#[target_feature(enable = "neon")]
20203#[cfg_attr(test, assert_instr(tbl))]
20204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20205pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
20206 vqtbl4q(a.0, a.1, a.2, a.3, b)
20207}
20208#[doc = "Table look-up"]
20209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20210#[inline(always)]
20211#[cfg(target_endian = "little")]
20212#[target_feature(enable = "neon")]
20213#[cfg_attr(test, assert_instr(tbl))]
20214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20215pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20216 unsafe {
20217 transmute(vqtbl4(
20218 transmute(a.0),
20219 transmute(a.1),
20220 transmute(a.2),
20221 transmute(a.3),
20222 b,
20223 ))
20224 }
20225}
20226#[doc = "Table look-up"]
20227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20228#[inline(always)]
20229#[cfg(target_endian = "big")]
20230#[target_feature(enable = "neon")]
20231#[cfg_attr(test, assert_instr(tbl))]
20232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20233pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20234 let mut a: uint8x16x4_t = a;
20235 a.0 = unsafe {
20236 simd_shuffle!(
20237 a.0,
20238 a.0,
20239 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20240 )
20241 };
20242 a.1 = unsafe {
20243 simd_shuffle!(
20244 a.1,
20245 a.1,
20246 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20247 )
20248 };
20249 a.2 = unsafe {
20250 simd_shuffle!(
20251 a.2,
20252 a.2,
20253 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20254 )
20255 };
20256 a.3 = unsafe {
20257 simd_shuffle!(
20258 a.3,
20259 a.3,
20260 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20261 )
20262 };
20263 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20264 unsafe {
20265 let ret_val: uint8x8_t = transmute(vqtbl4(
20266 transmute(a.0),
20267 transmute(a.1),
20268 transmute(a.2),
20269 transmute(a.3),
20270 b,
20271 ));
20272 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20273 }
20274}
20275#[doc = "Table look-up"]
20276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20277#[inline(always)]
20278#[cfg(target_endian = "little")]
20279#[target_feature(enable = "neon")]
20280#[cfg_attr(test, assert_instr(tbl))]
20281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20282pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20283 unsafe {
20284 transmute(vqtbl4q(
20285 transmute(a.0),
20286 transmute(a.1),
20287 transmute(a.2),
20288 transmute(a.3),
20289 b,
20290 ))
20291 }
20292}
20293#[doc = "Table look-up"]
20294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20295#[inline(always)]
20296#[cfg(target_endian = "big")]
20297#[target_feature(enable = "neon")]
20298#[cfg_attr(test, assert_instr(tbl))]
20299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20300pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20301 let mut a: uint8x16x4_t = a;
20302 a.0 = unsafe {
20303 simd_shuffle!(
20304 a.0,
20305 a.0,
20306 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20307 )
20308 };
20309 a.1 = unsafe {
20310 simd_shuffle!(
20311 a.1,
20312 a.1,
20313 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20314 )
20315 };
20316 a.2 = unsafe {
20317 simd_shuffle!(
20318 a.2,
20319 a.2,
20320 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20321 )
20322 };
20323 a.3 = unsafe {
20324 simd_shuffle!(
20325 a.3,
20326 a.3,
20327 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20328 )
20329 };
20330 let b: uint8x16_t =
20331 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20332 unsafe {
20333 let ret_val: uint8x16_t = transmute(vqtbl4q(
20334 transmute(a.0),
20335 transmute(a.1),
20336 transmute(a.2),
20337 transmute(a.3),
20338 b,
20339 ));
20340 simd_shuffle!(
20341 ret_val,
20342 ret_val,
20343 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20344 )
20345 }
20346}
20347#[doc = "Table look-up"]
20348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20349#[inline(always)]
20350#[cfg(target_endian = "little")]
20351#[target_feature(enable = "neon")]
20352#[cfg_attr(test, assert_instr(tbl))]
20353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20354pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20355 unsafe {
20356 transmute(vqtbl4(
20357 transmute(a.0),
20358 transmute(a.1),
20359 transmute(a.2),
20360 transmute(a.3),
20361 b,
20362 ))
20363 }
20364}
20365#[doc = "Table look-up"]
20366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20367#[inline(always)]
20368#[cfg(target_endian = "big")]
20369#[target_feature(enable = "neon")]
20370#[cfg_attr(test, assert_instr(tbl))]
20371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20372pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20373 let mut a: poly8x16x4_t = a;
20374 a.0 = unsafe {
20375 simd_shuffle!(
20376 a.0,
20377 a.0,
20378 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20379 )
20380 };
20381 a.1 = unsafe {
20382 simd_shuffle!(
20383 a.1,
20384 a.1,
20385 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20386 )
20387 };
20388 a.2 = unsafe {
20389 simd_shuffle!(
20390 a.2,
20391 a.2,
20392 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20393 )
20394 };
20395 a.3 = unsafe {
20396 simd_shuffle!(
20397 a.3,
20398 a.3,
20399 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20400 )
20401 };
20402 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20403 unsafe {
20404 let ret_val: poly8x8_t = transmute(vqtbl4(
20405 transmute(a.0),
20406 transmute(a.1),
20407 transmute(a.2),
20408 transmute(a.3),
20409 b,
20410 ));
20411 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20412 }
20413}
20414#[doc = "Table look-up"]
20415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20416#[inline(always)]
20417#[cfg(target_endian = "little")]
20418#[target_feature(enable = "neon")]
20419#[cfg_attr(test, assert_instr(tbl))]
20420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20421pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20422 unsafe {
20423 transmute(vqtbl4q(
20424 transmute(a.0),
20425 transmute(a.1),
20426 transmute(a.2),
20427 transmute(a.3),
20428 b,
20429 ))
20430 }
20431}
20432#[doc = "Table look-up"]
20433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20434#[inline(always)]
20435#[cfg(target_endian = "big")]
20436#[target_feature(enable = "neon")]
20437#[cfg_attr(test, assert_instr(tbl))]
20438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20439pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20440 let mut a: poly8x16x4_t = a;
20441 a.0 = unsafe {
20442 simd_shuffle!(
20443 a.0,
20444 a.0,
20445 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20446 )
20447 };
20448 a.1 = unsafe {
20449 simd_shuffle!(
20450 a.1,
20451 a.1,
20452 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20453 )
20454 };
20455 a.2 = unsafe {
20456 simd_shuffle!(
20457 a.2,
20458 a.2,
20459 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20460 )
20461 };
20462 a.3 = unsafe {
20463 simd_shuffle!(
20464 a.3,
20465 a.3,
20466 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20467 )
20468 };
20469 let b: uint8x16_t =
20470 unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20471 unsafe {
20472 let ret_val: poly8x16_t = transmute(vqtbl4q(
20473 transmute(a.0),
20474 transmute(a.1),
20475 transmute(a.2),
20476 transmute(a.3),
20477 b,
20478 ));
20479 simd_shuffle!(
20480 ret_val,
20481 ret_val,
20482 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20483 )
20484 }
20485}
20486#[doc = "Extended table look-up"]
20487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20488#[inline(always)]
20489#[target_feature(enable = "neon")]
20490#[cfg_attr(test, assert_instr(tbx))]
20491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20492fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20493 unsafe extern "unadjusted" {
20494 #[cfg_attr(
20495 any(target_arch = "aarch64", target_arch = "arm64ec"),
20496 link_name = "llvm.aarch64.neon.tbx1.v8i8"
20497 )]
20498 fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20499 }
20500 unsafe { _vqtbx1(a, b, c) }
20501}
20502#[doc = "Extended table look-up"]
20503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20504#[inline(always)]
20505#[target_feature(enable = "neon")]
20506#[cfg_attr(test, assert_instr(tbx))]
20507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20508fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20509 unsafe extern "unadjusted" {
20510 #[cfg_attr(
20511 any(target_arch = "aarch64", target_arch = "arm64ec"),
20512 link_name = "llvm.aarch64.neon.tbx1.v16i8"
20513 )]
20514 fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20515 }
20516 unsafe { _vqtbx1q(a, b, c) }
20517}
20518#[doc = "Extended table look-up"]
20519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20520#[inline(always)]
20521#[target_feature(enable = "neon")]
20522#[cfg_attr(test, assert_instr(tbx))]
20523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20524pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20525 vqtbx1(a, b, c)
20526}
20527#[doc = "Extended table look-up"]
20528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20529#[inline(always)]
20530#[target_feature(enable = "neon")]
20531#[cfg_attr(test, assert_instr(tbx))]
20532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20533pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20534 vqtbx1q(a, b, c)
20535}
20536#[doc = "Extended table look-up"]
20537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20538#[inline(always)]
20539#[target_feature(enable = "neon")]
20540#[cfg_attr(test, assert_instr(tbx))]
20541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20542pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20543 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20544}
20545#[doc = "Extended table look-up"]
20546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20547#[inline(always)]
20548#[target_feature(enable = "neon")]
20549#[cfg_attr(test, assert_instr(tbx))]
20550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20551pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20552 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20553}
20554#[doc = "Extended table look-up"]
20555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20556#[inline(always)]
20557#[target_feature(enable = "neon")]
20558#[cfg_attr(test, assert_instr(tbx))]
20559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20560pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20561 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20562}
20563#[doc = "Extended table look-up"]
20564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20565#[inline(always)]
20566#[target_feature(enable = "neon")]
20567#[cfg_attr(test, assert_instr(tbx))]
20568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20569pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20570 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20571}
20572#[doc = "Extended table look-up"]
20573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20574#[inline(always)]
20575#[target_feature(enable = "neon")]
20576#[cfg_attr(test, assert_instr(tbx))]
20577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20578fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20579 unsafe extern "unadjusted" {
20580 #[cfg_attr(
20581 any(target_arch = "aarch64", target_arch = "arm64ec"),
20582 link_name = "llvm.aarch64.neon.tbx2.v8i8"
20583 )]
20584 fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20585 }
20586 unsafe { _vqtbx2(a, b, c, d) }
20587}
20588#[doc = "Extended table look-up"]
20589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20590#[inline(always)]
20591#[target_feature(enable = "neon")]
20592#[cfg_attr(test, assert_instr(tbx))]
20593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20594fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20595 unsafe extern "unadjusted" {
20596 #[cfg_attr(
20597 any(target_arch = "aarch64", target_arch = "arm64ec"),
20598 link_name = "llvm.aarch64.neon.tbx2.v16i8"
20599 )]
20600 fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20601 }
20602 unsafe { _vqtbx2q(a, b, c, d) }
20603}
20604#[doc = "Extended table look-up"]
20605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20606#[inline(always)]
20607#[target_feature(enable = "neon")]
20608#[cfg_attr(test, assert_instr(tbx))]
20609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20610pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20611 vqtbx2(a, b.0, b.1, c)
20612}
20613#[doc = "Extended table look-up"]
20614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20615#[inline(always)]
20616#[target_feature(enable = "neon")]
20617#[cfg_attr(test, assert_instr(tbx))]
20618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20619pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20620 vqtbx2q(a, b.0, b.1, c)
20621}
20622#[doc = "Extended table look-up"]
20623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20624#[inline(always)]
20625#[cfg(target_endian = "little")]
20626#[target_feature(enable = "neon")]
20627#[cfg_attr(test, assert_instr(tbx))]
20628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20629pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20630 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20631}
20632#[doc = "Extended table look-up"]
20633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20634#[inline(always)]
20635#[cfg(target_endian = "big")]
20636#[target_feature(enable = "neon")]
20637#[cfg_attr(test, assert_instr(tbx))]
20638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20639pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20640 let mut b: uint8x16x2_t = b;
20641 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20642 b.0 = unsafe {
20643 simd_shuffle!(
20644 b.0,
20645 b.0,
20646 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20647 )
20648 };
20649 b.1 = unsafe {
20650 simd_shuffle!(
20651 b.1,
20652 b.1,
20653 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20654 )
20655 };
20656 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20657 unsafe {
20658 let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20659 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20660 }
20661}
20662#[doc = "Extended table look-up"]
20663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20664#[inline(always)]
20665#[cfg(target_endian = "little")]
20666#[target_feature(enable = "neon")]
20667#[cfg_attr(test, assert_instr(tbx))]
20668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20669pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20670 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20671}
20672#[doc = "Extended table look-up"]
20673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20674#[inline(always)]
20675#[cfg(target_endian = "big")]
20676#[target_feature(enable = "neon")]
20677#[cfg_attr(test, assert_instr(tbx))]
20678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20679pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20680 let mut b: uint8x16x2_t = b;
20681 let a: uint8x16_t =
20682 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20683 b.0 = unsafe {
20684 simd_shuffle!(
20685 b.0,
20686 b.0,
20687 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20688 )
20689 };
20690 b.1 = unsafe {
20691 simd_shuffle!(
20692 b.1,
20693 b.1,
20694 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20695 )
20696 };
20697 let c: uint8x16_t =
20698 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20699 unsafe {
20700 let ret_val: uint8x16_t =
20701 transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20702 simd_shuffle!(
20703 ret_val,
20704 ret_val,
20705 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20706 )
20707 }
20708}
20709#[doc = "Extended table look-up"]
20710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20711#[inline(always)]
20712#[cfg(target_endian = "little")]
20713#[target_feature(enable = "neon")]
20714#[cfg_attr(test, assert_instr(tbx))]
20715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20716pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20717 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20718}
20719#[doc = "Extended table look-up"]
20720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20721#[inline(always)]
20722#[cfg(target_endian = "big")]
20723#[target_feature(enable = "neon")]
20724#[cfg_attr(test, assert_instr(tbx))]
20725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20726pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20727 let mut b: poly8x16x2_t = b;
20728 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20729 b.0 = unsafe {
20730 simd_shuffle!(
20731 b.0,
20732 b.0,
20733 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20734 )
20735 };
20736 b.1 = unsafe {
20737 simd_shuffle!(
20738 b.1,
20739 b.1,
20740 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20741 )
20742 };
20743 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20744 unsafe {
20745 let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20746 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20747 }
20748}
20749#[doc = "Extended table look-up"]
20750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20751#[inline(always)]
20752#[cfg(target_endian = "little")]
20753#[target_feature(enable = "neon")]
20754#[cfg_attr(test, assert_instr(tbx))]
20755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20756pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20757 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20758}
20759#[doc = "Extended table look-up"]
20760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20761#[inline(always)]
20762#[cfg(target_endian = "big")]
20763#[target_feature(enable = "neon")]
20764#[cfg_attr(test, assert_instr(tbx))]
20765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20766pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20767 let mut b: poly8x16x2_t = b;
20768 let a: poly8x16_t =
20769 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20770 b.0 = unsafe {
20771 simd_shuffle!(
20772 b.0,
20773 b.0,
20774 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20775 )
20776 };
20777 b.1 = unsafe {
20778 simd_shuffle!(
20779 b.1,
20780 b.1,
20781 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20782 )
20783 };
20784 let c: uint8x16_t =
20785 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20786 unsafe {
20787 let ret_val: poly8x16_t =
20788 transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20789 simd_shuffle!(
20790 ret_val,
20791 ret_val,
20792 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20793 )
20794 }
20795}
20796#[doc = "Extended table look-up"]
20797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20798#[inline(always)]
20799#[target_feature(enable = "neon")]
20800#[cfg_attr(test, assert_instr(tbx))]
20801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20802fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20803 unsafe extern "unadjusted" {
20804 #[cfg_attr(
20805 any(target_arch = "aarch64", target_arch = "arm64ec"),
20806 link_name = "llvm.aarch64.neon.tbx3.v8i8"
20807 )]
20808 fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20809 -> int8x8_t;
20810 }
20811 unsafe { _vqtbx3(a, b, c, d, e) }
20812}
20813#[doc = "Extended table look-up"]
20814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20815#[inline(always)]
20816#[target_feature(enable = "neon")]
20817#[cfg_attr(test, assert_instr(tbx))]
20818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20819fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20820 unsafe extern "unadjusted" {
20821 #[cfg_attr(
20822 any(target_arch = "aarch64", target_arch = "arm64ec"),
20823 link_name = "llvm.aarch64.neon.tbx3.v16i8"
20824 )]
20825 fn _vqtbx3q(
20826 a: int8x16_t,
20827 b: int8x16_t,
20828 c: int8x16_t,
20829 d: int8x16_t,
20830 e: uint8x16_t,
20831 ) -> int8x16_t;
20832 }
20833 unsafe { _vqtbx3q(a, b, c, d, e) }
20834}
20835#[doc = "Extended table look-up"]
20836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20837#[inline(always)]
20838#[target_feature(enable = "neon")]
20839#[cfg_attr(test, assert_instr(tbx))]
20840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20841pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20842 vqtbx3(a, b.0, b.1, b.2, c)
20843}
20844#[doc = "Extended table look-up"]
20845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20846#[inline(always)]
20847#[target_feature(enable = "neon")]
20848#[cfg_attr(test, assert_instr(tbx))]
20849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20850pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20851 vqtbx3q(a, b.0, b.1, b.2, c)
20852}
20853#[doc = "Extended table look-up"]
20854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20855#[inline(always)]
20856#[cfg(target_endian = "little")]
20857#[target_feature(enable = "neon")]
20858#[cfg_attr(test, assert_instr(tbx))]
20859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20860pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20861 unsafe {
20862 transmute(vqtbx3(
20863 transmute(a),
20864 transmute(b.0),
20865 transmute(b.1),
20866 transmute(b.2),
20867 c,
20868 ))
20869 }
20870}
20871#[doc = "Extended table look-up"]
20872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20873#[inline(always)]
20874#[cfg(target_endian = "big")]
20875#[target_feature(enable = "neon")]
20876#[cfg_attr(test, assert_instr(tbx))]
20877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20878pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20879 let mut b: uint8x16x3_t = b;
20880 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20881 b.0 = unsafe {
20882 simd_shuffle!(
20883 b.0,
20884 b.0,
20885 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20886 )
20887 };
20888 b.1 = unsafe {
20889 simd_shuffle!(
20890 b.1,
20891 b.1,
20892 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20893 )
20894 };
20895 b.2 = unsafe {
20896 simd_shuffle!(
20897 b.2,
20898 b.2,
20899 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20900 )
20901 };
20902 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20903 unsafe {
20904 let ret_val: uint8x8_t = transmute(vqtbx3(
20905 transmute(a),
20906 transmute(b.0),
20907 transmute(b.1),
20908 transmute(b.2),
20909 c,
20910 ));
20911 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20912 }
20913}
20914#[doc = "Extended table look-up"]
20915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20916#[inline(always)]
20917#[cfg(target_endian = "little")]
20918#[target_feature(enable = "neon")]
20919#[cfg_attr(test, assert_instr(tbx))]
20920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20921pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20922 unsafe {
20923 transmute(vqtbx3q(
20924 transmute(a),
20925 transmute(b.0),
20926 transmute(b.1),
20927 transmute(b.2),
20928 c,
20929 ))
20930 }
20931}
20932#[doc = "Extended table look-up"]
20933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20934#[inline(always)]
20935#[cfg(target_endian = "big")]
20936#[target_feature(enable = "neon")]
20937#[cfg_attr(test, assert_instr(tbx))]
20938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20939pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20940 let mut b: uint8x16x3_t = b;
20941 let a: uint8x16_t =
20942 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20943 b.0 = unsafe {
20944 simd_shuffle!(
20945 b.0,
20946 b.0,
20947 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20948 )
20949 };
20950 b.1 = unsafe {
20951 simd_shuffle!(
20952 b.1,
20953 b.1,
20954 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20955 )
20956 };
20957 b.2 = unsafe {
20958 simd_shuffle!(
20959 b.2,
20960 b.2,
20961 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20962 )
20963 };
20964 let c: uint8x16_t =
20965 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20966 unsafe {
20967 let ret_val: uint8x16_t = transmute(vqtbx3q(
20968 transmute(a),
20969 transmute(b.0),
20970 transmute(b.1),
20971 transmute(b.2),
20972 c,
20973 ));
20974 simd_shuffle!(
20975 ret_val,
20976 ret_val,
20977 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20978 )
20979 }
20980}
20981#[doc = "Extended table look-up"]
20982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20983#[inline(always)]
20984#[cfg(target_endian = "little")]
20985#[target_feature(enable = "neon")]
20986#[cfg_attr(test, assert_instr(tbx))]
20987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20988pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20989 unsafe {
20990 transmute(vqtbx3(
20991 transmute(a),
20992 transmute(b.0),
20993 transmute(b.1),
20994 transmute(b.2),
20995 c,
20996 ))
20997 }
20998}
20999#[doc = "Extended table look-up"]
21000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
21001#[inline(always)]
21002#[cfg(target_endian = "big")]
21003#[target_feature(enable = "neon")]
21004#[cfg_attr(test, assert_instr(tbx))]
21005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21006pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
21007 let mut b: poly8x16x3_t = b;
21008 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21009 b.0 = unsafe {
21010 simd_shuffle!(
21011 b.0,
21012 b.0,
21013 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21014 )
21015 };
21016 b.1 = unsafe {
21017 simd_shuffle!(
21018 b.1,
21019 b.1,
21020 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21021 )
21022 };
21023 b.2 = unsafe {
21024 simd_shuffle!(
21025 b.2,
21026 b.2,
21027 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21028 )
21029 };
21030 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21031 unsafe {
21032 let ret_val: poly8x8_t = transmute(vqtbx3(
21033 transmute(a),
21034 transmute(b.0),
21035 transmute(b.1),
21036 transmute(b.2),
21037 c,
21038 ));
21039 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21040 }
21041}
21042#[doc = "Extended table look-up"]
21043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
21044#[inline(always)]
21045#[cfg(target_endian = "little")]
21046#[target_feature(enable = "neon")]
21047#[cfg_attr(test, assert_instr(tbx))]
21048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21049pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
21050 unsafe {
21051 transmute(vqtbx3q(
21052 transmute(a),
21053 transmute(b.0),
21054 transmute(b.1),
21055 transmute(b.2),
21056 c,
21057 ))
21058 }
21059}
21060#[doc = "Extended table look-up"]
21061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
21062#[inline(always)]
21063#[cfg(target_endian = "big")]
21064#[target_feature(enable = "neon")]
21065#[cfg_attr(test, assert_instr(tbx))]
21066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21067pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
21068 let mut b: poly8x16x3_t = b;
21069 let a: poly8x16_t =
21070 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21071 b.0 = unsafe {
21072 simd_shuffle!(
21073 b.0,
21074 b.0,
21075 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21076 )
21077 };
21078 b.1 = unsafe {
21079 simd_shuffle!(
21080 b.1,
21081 b.1,
21082 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21083 )
21084 };
21085 b.2 = unsafe {
21086 simd_shuffle!(
21087 b.2,
21088 b.2,
21089 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21090 )
21091 };
21092 let c: uint8x16_t =
21093 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21094 unsafe {
21095 let ret_val: poly8x16_t = transmute(vqtbx3q(
21096 transmute(a),
21097 transmute(b.0),
21098 transmute(b.1),
21099 transmute(b.2),
21100 c,
21101 ));
21102 simd_shuffle!(
21103 ret_val,
21104 ret_val,
21105 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21106 )
21107 }
21108}
21109#[doc = "Extended table look-up"]
21110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
21111#[inline(always)]
21112#[target_feature(enable = "neon")]
21113#[cfg_attr(test, assert_instr(tbx))]
21114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21115fn vqtbx4(
21116 a: int8x8_t,
21117 b: int8x16_t,
21118 c: int8x16_t,
21119 d: int8x16_t,
21120 e: int8x16_t,
21121 f: uint8x8_t,
21122) -> int8x8_t {
21123 unsafe extern "unadjusted" {
21124 #[cfg_attr(
21125 any(target_arch = "aarch64", target_arch = "arm64ec"),
21126 link_name = "llvm.aarch64.neon.tbx4.v8i8"
21127 )]
21128 fn _vqtbx4(
21129 a: int8x8_t,
21130 b: int8x16_t,
21131 c: int8x16_t,
21132 d: int8x16_t,
21133 e: int8x16_t,
21134 f: uint8x8_t,
21135 ) -> int8x8_t;
21136 }
21137 unsafe { _vqtbx4(a, b, c, d, e, f) }
21138}
21139#[doc = "Extended table look-up"]
21140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
21141#[inline(always)]
21142#[target_feature(enable = "neon")]
21143#[cfg_attr(test, assert_instr(tbx))]
21144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21145fn vqtbx4q(
21146 a: int8x16_t,
21147 b: int8x16_t,
21148 c: int8x16_t,
21149 d: int8x16_t,
21150 e: int8x16_t,
21151 f: uint8x16_t,
21152) -> int8x16_t {
21153 unsafe extern "unadjusted" {
21154 #[cfg_attr(
21155 any(target_arch = "aarch64", target_arch = "arm64ec"),
21156 link_name = "llvm.aarch64.neon.tbx4.v16i8"
21157 )]
21158 fn _vqtbx4q(
21159 a: int8x16_t,
21160 b: int8x16_t,
21161 c: int8x16_t,
21162 d: int8x16_t,
21163 e: int8x16_t,
21164 f: uint8x16_t,
21165 ) -> int8x16_t;
21166 }
21167 unsafe { _vqtbx4q(a, b, c, d, e, f) }
21168}
21169#[doc = "Extended table look-up"]
21170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
21171#[inline(always)]
21172#[target_feature(enable = "neon")]
21173#[cfg_attr(test, assert_instr(tbx))]
21174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21175pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
21176 vqtbx4(a, b.0, b.1, b.2, b.3, c)
21177}
21178#[doc = "Extended table look-up"]
21179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
21180#[inline(always)]
21181#[target_feature(enable = "neon")]
21182#[cfg_attr(test, assert_instr(tbx))]
21183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21184pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
21185 vqtbx4q(a, b.0, b.1, b.2, b.3, c)
21186}
21187#[doc = "Extended table look-up"]
21188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21189#[inline(always)]
21190#[cfg(target_endian = "little")]
21191#[target_feature(enable = "neon")]
21192#[cfg_attr(test, assert_instr(tbx))]
21193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21194pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21195 unsafe {
21196 transmute(vqtbx4(
21197 transmute(a),
21198 transmute(b.0),
21199 transmute(b.1),
21200 transmute(b.2),
21201 transmute(b.3),
21202 c,
21203 ))
21204 }
21205}
21206#[doc = "Extended table look-up"]
21207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21208#[inline(always)]
21209#[cfg(target_endian = "big")]
21210#[target_feature(enable = "neon")]
21211#[cfg_attr(test, assert_instr(tbx))]
21212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21213pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21214 let mut b: uint8x16x4_t = b;
21215 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21216 b.0 = unsafe {
21217 simd_shuffle!(
21218 b.0,
21219 b.0,
21220 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21221 )
21222 };
21223 b.1 = unsafe {
21224 simd_shuffle!(
21225 b.1,
21226 b.1,
21227 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21228 )
21229 };
21230 b.2 = unsafe {
21231 simd_shuffle!(
21232 b.2,
21233 b.2,
21234 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21235 )
21236 };
21237 b.3 = unsafe {
21238 simd_shuffle!(
21239 b.3,
21240 b.3,
21241 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21242 )
21243 };
21244 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21245 unsafe {
21246 let ret_val: uint8x8_t = transmute(vqtbx4(
21247 transmute(a),
21248 transmute(b.0),
21249 transmute(b.1),
21250 transmute(b.2),
21251 transmute(b.3),
21252 c,
21253 ));
21254 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21255 }
21256}
21257#[doc = "Extended table look-up"]
21258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21259#[inline(always)]
21260#[cfg(target_endian = "little")]
21261#[target_feature(enable = "neon")]
21262#[cfg_attr(test, assert_instr(tbx))]
21263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21264pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21265 unsafe {
21266 transmute(vqtbx4q(
21267 transmute(a),
21268 transmute(b.0),
21269 transmute(b.1),
21270 transmute(b.2),
21271 transmute(b.3),
21272 c,
21273 ))
21274 }
21275}
21276#[doc = "Extended table look-up"]
21277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21278#[inline(always)]
21279#[cfg(target_endian = "big")]
21280#[target_feature(enable = "neon")]
21281#[cfg_attr(test, assert_instr(tbx))]
21282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21283pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21284 let mut b: uint8x16x4_t = b;
21285 let a: uint8x16_t =
21286 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21287 b.0 = unsafe {
21288 simd_shuffle!(
21289 b.0,
21290 b.0,
21291 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21292 )
21293 };
21294 b.1 = unsafe {
21295 simd_shuffle!(
21296 b.1,
21297 b.1,
21298 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21299 )
21300 };
21301 b.2 = unsafe {
21302 simd_shuffle!(
21303 b.2,
21304 b.2,
21305 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21306 )
21307 };
21308 b.3 = unsafe {
21309 simd_shuffle!(
21310 b.3,
21311 b.3,
21312 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21313 )
21314 };
21315 let c: uint8x16_t =
21316 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21317 unsafe {
21318 let ret_val: uint8x16_t = transmute(vqtbx4q(
21319 transmute(a),
21320 transmute(b.0),
21321 transmute(b.1),
21322 transmute(b.2),
21323 transmute(b.3),
21324 c,
21325 ));
21326 simd_shuffle!(
21327 ret_val,
21328 ret_val,
21329 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21330 )
21331 }
21332}
21333#[doc = "Extended table look-up"]
21334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21335#[inline(always)]
21336#[cfg(target_endian = "little")]
21337#[target_feature(enable = "neon")]
21338#[cfg_attr(test, assert_instr(tbx))]
21339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21340pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21341 unsafe {
21342 transmute(vqtbx4(
21343 transmute(a),
21344 transmute(b.0),
21345 transmute(b.1),
21346 transmute(b.2),
21347 transmute(b.3),
21348 c,
21349 ))
21350 }
21351}
21352#[doc = "Extended table look-up"]
21353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21354#[inline(always)]
21355#[cfg(target_endian = "big")]
21356#[target_feature(enable = "neon")]
21357#[cfg_attr(test, assert_instr(tbx))]
21358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21359pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21360 let mut b: poly8x16x4_t = b;
21361 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21362 b.0 = unsafe {
21363 simd_shuffle!(
21364 b.0,
21365 b.0,
21366 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21367 )
21368 };
21369 b.1 = unsafe {
21370 simd_shuffle!(
21371 b.1,
21372 b.1,
21373 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21374 )
21375 };
21376 b.2 = unsafe {
21377 simd_shuffle!(
21378 b.2,
21379 b.2,
21380 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21381 )
21382 };
21383 b.3 = unsafe {
21384 simd_shuffle!(
21385 b.3,
21386 b.3,
21387 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21388 )
21389 };
21390 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21391 unsafe {
21392 let ret_val: poly8x8_t = transmute(vqtbx4(
21393 transmute(a),
21394 transmute(b.0),
21395 transmute(b.1),
21396 transmute(b.2),
21397 transmute(b.3),
21398 c,
21399 ));
21400 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21401 }
21402}
21403#[doc = "Extended table look-up"]
21404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21405#[inline(always)]
21406#[cfg(target_endian = "little")]
21407#[target_feature(enable = "neon")]
21408#[cfg_attr(test, assert_instr(tbx))]
21409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21410pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21411 unsafe {
21412 transmute(vqtbx4q(
21413 transmute(a),
21414 transmute(b.0),
21415 transmute(b.1),
21416 transmute(b.2),
21417 transmute(b.3),
21418 c,
21419 ))
21420 }
21421}
21422#[doc = "Extended table look-up"]
21423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21424#[inline(always)]
21425#[cfg(target_endian = "big")]
21426#[target_feature(enable = "neon")]
21427#[cfg_attr(test, assert_instr(tbx))]
21428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21429pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21430 let mut b: poly8x16x4_t = b;
21431 let a: poly8x16_t =
21432 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21433 b.0 = unsafe {
21434 simd_shuffle!(
21435 b.0,
21436 b.0,
21437 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21438 )
21439 };
21440 b.1 = unsafe {
21441 simd_shuffle!(
21442 b.1,
21443 b.1,
21444 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21445 )
21446 };
21447 b.2 = unsafe {
21448 simd_shuffle!(
21449 b.2,
21450 b.2,
21451 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21452 )
21453 };
21454 b.3 = unsafe {
21455 simd_shuffle!(
21456 b.3,
21457 b.3,
21458 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21459 )
21460 };
21461 let c: uint8x16_t =
21462 unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21463 unsafe {
21464 let ret_val: poly8x16_t = transmute(vqtbx4q(
21465 transmute(a),
21466 transmute(b.0),
21467 transmute(b.1),
21468 transmute(b.2),
21469 transmute(b.3),
21470 c,
21471 ));
21472 simd_shuffle!(
21473 ret_val,
21474 ret_val,
21475 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21476 )
21477 }
21478}
21479#[doc = "Rotate and exclusive OR"]
21480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21481#[inline(always)]
21482#[target_feature(enable = "neon,sha3")]
21483#[cfg_attr(test, assert_instr(rax1))]
21484#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21485pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21486 unsafe extern "unadjusted" {
21487 #[cfg_attr(
21488 any(target_arch = "aarch64", target_arch = "arm64ec"),
21489 link_name = "llvm.aarch64.crypto.rax1"
21490 )]
21491 fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21492 }
21493 unsafe { _vrax1q_u64(a, b) }
21494}
21495#[doc = "Reverse bit order"]
21496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21497#[inline(always)]
21498#[target_feature(enable = "neon")]
21499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21500#[cfg_attr(test, assert_instr(rbit))]
21501pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21502 unsafe { simd_bitreverse(a) }
21503}
21504#[doc = "Reverse bit order"]
21505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21506#[inline(always)]
21507#[target_feature(enable = "neon")]
21508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21509#[cfg_attr(test, assert_instr(rbit))]
21510pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21511 unsafe { simd_bitreverse(a) }
21512}
21513#[doc = "Reverse bit order"]
21514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21515#[inline(always)]
21516#[cfg(target_endian = "little")]
21517#[target_feature(enable = "neon")]
21518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21519#[cfg_attr(test, assert_instr(rbit))]
21520pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21521 unsafe { transmute(vrbit_s8(transmute(a))) }
21522}
21523#[doc = "Reverse bit order"]
21524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21525#[inline(always)]
21526#[cfg(target_endian = "big")]
21527#[target_feature(enable = "neon")]
21528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21529#[cfg_attr(test, assert_instr(rbit))]
21530pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21531 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21532 unsafe {
21533 let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21534 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21535 }
21536}
21537#[doc = "Reverse bit order"]
21538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21539#[inline(always)]
21540#[cfg(target_endian = "little")]
21541#[target_feature(enable = "neon")]
21542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21543#[cfg_attr(test, assert_instr(rbit))]
21544pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21545 unsafe { transmute(vrbitq_s8(transmute(a))) }
21546}
21547#[doc = "Reverse bit order"]
21548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21549#[inline(always)]
21550#[cfg(target_endian = "big")]
21551#[target_feature(enable = "neon")]
21552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21553#[cfg_attr(test, assert_instr(rbit))]
21554pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21555 let a: uint8x16_t =
21556 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21557 unsafe {
21558 let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21559 simd_shuffle!(
21560 ret_val,
21561 ret_val,
21562 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21563 )
21564 }
21565}
21566#[doc = "Reverse bit order"]
21567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21568#[inline(always)]
21569#[cfg(target_endian = "little")]
21570#[target_feature(enable = "neon")]
21571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21572#[cfg_attr(test, assert_instr(rbit))]
21573pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21574 unsafe { transmute(vrbit_s8(transmute(a))) }
21575}
21576#[doc = "Reverse bit order"]
21577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21578#[inline(always)]
21579#[cfg(target_endian = "big")]
21580#[target_feature(enable = "neon")]
21581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21582#[cfg_attr(test, assert_instr(rbit))]
21583pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21584 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21585 unsafe {
21586 let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21587 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21588 }
21589}
21590#[doc = "Reverse bit order"]
21591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21592#[inline(always)]
21593#[cfg(target_endian = "little")]
21594#[target_feature(enable = "neon")]
21595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21596#[cfg_attr(test, assert_instr(rbit))]
21597pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21598 unsafe { transmute(vrbitq_s8(transmute(a))) }
21599}
21600#[doc = "Reverse bit order"]
21601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21602#[inline(always)]
21603#[cfg(target_endian = "big")]
21604#[target_feature(enable = "neon")]
21605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21606#[cfg_attr(test, assert_instr(rbit))]
21607pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21608 let a: poly8x16_t =
21609 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21610 unsafe {
21611 let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21612 simd_shuffle!(
21613 ret_val,
21614 ret_val,
21615 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21616 )
21617 }
21618}
21619#[doc = "Reciprocal estimate."]
21620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21621#[inline(always)]
21622#[target_feature(enable = "neon")]
21623#[cfg_attr(test, assert_instr(frecpe))]
21624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21625pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21626 unsafe extern "unadjusted" {
21627 #[cfg_attr(
21628 any(target_arch = "aarch64", target_arch = "arm64ec"),
21629 link_name = "llvm.aarch64.neon.frecpe.v1f64"
21630 )]
21631 fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21632 }
21633 unsafe { _vrecpe_f64(a) }
21634}
21635#[doc = "Reciprocal estimate."]
21636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21637#[inline(always)]
21638#[target_feature(enable = "neon")]
21639#[cfg_attr(test, assert_instr(frecpe))]
21640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21641pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21642 unsafe extern "unadjusted" {
21643 #[cfg_attr(
21644 any(target_arch = "aarch64", target_arch = "arm64ec"),
21645 link_name = "llvm.aarch64.neon.frecpe.v2f64"
21646 )]
21647 fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21648 }
21649 unsafe { _vrecpeq_f64(a) }
21650}
21651#[doc = "Reciprocal estimate."]
21652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21653#[inline(always)]
21654#[target_feature(enable = "neon")]
21655#[cfg_attr(test, assert_instr(frecpe))]
21656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21657pub fn vrecped_f64(a: f64) -> f64 {
21658 unsafe extern "unadjusted" {
21659 #[cfg_attr(
21660 any(target_arch = "aarch64", target_arch = "arm64ec"),
21661 link_name = "llvm.aarch64.neon.frecpe.f64"
21662 )]
21663 fn _vrecped_f64(a: f64) -> f64;
21664 }
21665 unsafe { _vrecped_f64(a) }
21666}
21667#[doc = "Reciprocal estimate."]
21668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21669#[inline(always)]
21670#[target_feature(enable = "neon")]
21671#[cfg_attr(test, assert_instr(frecpe))]
21672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21673pub fn vrecpes_f32(a: f32) -> f32 {
21674 unsafe extern "unadjusted" {
21675 #[cfg_attr(
21676 any(target_arch = "aarch64", target_arch = "arm64ec"),
21677 link_name = "llvm.aarch64.neon.frecpe.f32"
21678 )]
21679 fn _vrecpes_f32(a: f32) -> f32;
21680 }
21681 unsafe { _vrecpes_f32(a) }
21682}
21683#[doc = "Reciprocal estimate."]
21684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21685#[inline(always)]
21686#[cfg_attr(test, assert_instr(frecpe))]
21687#[target_feature(enable = "neon,fp16")]
21688#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21689#[cfg(not(target_arch = "arm64ec"))]
21690pub fn vrecpeh_f16(a: f16) -> f16 {
21691 unsafe extern "unadjusted" {
21692 #[cfg_attr(
21693 any(target_arch = "aarch64", target_arch = "arm64ec"),
21694 link_name = "llvm.aarch64.neon.frecpe.f16"
21695 )]
21696 fn _vrecpeh_f16(a: f16) -> f16;
21697 }
21698 unsafe { _vrecpeh_f16(a) }
21699}
21700#[doc = "Floating-point reciprocal step"]
21701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21702#[inline(always)]
21703#[target_feature(enable = "neon")]
21704#[cfg_attr(test, assert_instr(frecps))]
21705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21706pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21707 unsafe extern "unadjusted" {
21708 #[cfg_attr(
21709 any(target_arch = "aarch64", target_arch = "arm64ec"),
21710 link_name = "llvm.aarch64.neon.frecps.v1f64"
21711 )]
21712 fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21713 }
21714 unsafe { _vrecps_f64(a, b) }
21715}
21716#[doc = "Floating-point reciprocal step"]
21717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21718#[inline(always)]
21719#[target_feature(enable = "neon")]
21720#[cfg_attr(test, assert_instr(frecps))]
21721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21722pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21723 unsafe extern "unadjusted" {
21724 #[cfg_attr(
21725 any(target_arch = "aarch64", target_arch = "arm64ec"),
21726 link_name = "llvm.aarch64.neon.frecps.v2f64"
21727 )]
21728 fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21729 }
21730 unsafe { _vrecpsq_f64(a, b) }
21731}
21732#[doc = "Floating-point reciprocal step"]
21733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21734#[inline(always)]
21735#[target_feature(enable = "neon")]
21736#[cfg_attr(test, assert_instr(frecps))]
21737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21738pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21739 unsafe extern "unadjusted" {
21740 #[cfg_attr(
21741 any(target_arch = "aarch64", target_arch = "arm64ec"),
21742 link_name = "llvm.aarch64.neon.frecps.f64"
21743 )]
21744 fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21745 }
21746 unsafe { _vrecpsd_f64(a, b) }
21747}
21748#[doc = "Floating-point reciprocal step"]
21749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21750#[inline(always)]
21751#[target_feature(enable = "neon")]
21752#[cfg_attr(test, assert_instr(frecps))]
21753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21754pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21755 unsafe extern "unadjusted" {
21756 #[cfg_attr(
21757 any(target_arch = "aarch64", target_arch = "arm64ec"),
21758 link_name = "llvm.aarch64.neon.frecps.f32"
21759 )]
21760 fn _vrecpss_f32(a: f32, b: f32) -> f32;
21761 }
21762 unsafe { _vrecpss_f32(a, b) }
21763}
21764#[doc = "Floating-point reciprocal step"]
21765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21766#[inline(always)]
21767#[cfg_attr(test, assert_instr(frecps))]
21768#[target_feature(enable = "neon,fp16")]
21769#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21770#[cfg(not(target_arch = "arm64ec"))]
21771pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21772 unsafe extern "unadjusted" {
21773 #[cfg_attr(
21774 any(target_arch = "aarch64", target_arch = "arm64ec"),
21775 link_name = "llvm.aarch64.neon.frecps.f16"
21776 )]
21777 fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21778 }
21779 unsafe { _vrecpsh_f16(a, b) }
21780}
21781#[doc = "Floating-point reciprocal exponent"]
21782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21783#[inline(always)]
21784#[target_feature(enable = "neon")]
21785#[cfg_attr(test, assert_instr(frecpx))]
21786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21787pub fn vrecpxd_f64(a: f64) -> f64 {
21788 unsafe extern "unadjusted" {
21789 #[cfg_attr(
21790 any(target_arch = "aarch64", target_arch = "arm64ec"),
21791 link_name = "llvm.aarch64.neon.frecpx.f64"
21792 )]
21793 fn _vrecpxd_f64(a: f64) -> f64;
21794 }
21795 unsafe { _vrecpxd_f64(a) }
21796}
21797#[doc = "Floating-point reciprocal exponent"]
21798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21799#[inline(always)]
21800#[target_feature(enable = "neon")]
21801#[cfg_attr(test, assert_instr(frecpx))]
21802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21803pub fn vrecpxs_f32(a: f32) -> f32 {
21804 unsafe extern "unadjusted" {
21805 #[cfg_attr(
21806 any(target_arch = "aarch64", target_arch = "arm64ec"),
21807 link_name = "llvm.aarch64.neon.frecpx.f32"
21808 )]
21809 fn _vrecpxs_f32(a: f32) -> f32;
21810 }
21811 unsafe { _vrecpxs_f32(a) }
21812}
21813#[doc = "Floating-point reciprocal exponent"]
21814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21815#[inline(always)]
21816#[cfg_attr(test, assert_instr(frecpx))]
21817#[target_feature(enable = "neon,fp16")]
21818#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21819#[cfg(not(target_arch = "arm64ec"))]
21820pub fn vrecpxh_f16(a: f16) -> f16 {
21821 unsafe extern "unadjusted" {
21822 #[cfg_attr(
21823 any(target_arch = "aarch64", target_arch = "arm64ec"),
21824 link_name = "llvm.aarch64.neon.frecpx.f16"
21825 )]
21826 fn _vrecpxh_f16(a: f16) -> f16;
21827 }
21828 unsafe { _vrecpxh_f16(a) }
21829}
21830#[doc = "Vector reinterpret cast operation"]
21831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21832#[inline(always)]
21833#[cfg(target_endian = "little")]
21834#[target_feature(enable = "neon")]
21835#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21836#[cfg(not(target_arch = "arm64ec"))]
21837#[cfg_attr(test, assert_instr(nop))]
21838pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21839 unsafe { transmute(a) }
21840}
21841#[doc = "Vector reinterpret cast operation"]
21842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21843#[inline(always)]
21844#[cfg(target_endian = "big")]
21845#[target_feature(enable = "neon")]
21846#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21847#[cfg(not(target_arch = "arm64ec"))]
21848#[cfg_attr(test, assert_instr(nop))]
21849pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21850 let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21851 unsafe { transmute(a) }
21852}
21853#[doc = "Vector reinterpret cast operation"]
21854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21855#[inline(always)]
21856#[cfg(target_endian = "little")]
21857#[target_feature(enable = "neon")]
21858#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21859#[cfg(not(target_arch = "arm64ec"))]
21860#[cfg_attr(test, assert_instr(nop))]
21861pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21862 unsafe { transmute(a) }
21863}
21864#[doc = "Vector reinterpret cast operation"]
21865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21866#[inline(always)]
21867#[cfg(target_endian = "big")]
21868#[target_feature(enable = "neon")]
21869#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21870#[cfg(not(target_arch = "arm64ec"))]
21871#[cfg_attr(test, assert_instr(nop))]
21872pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21873 let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21874 unsafe {
21875 let ret_val: float64x2_t = transmute(a);
21876 simd_shuffle!(ret_val, ret_val, [1, 0])
21877 }
21878}
21879#[doc = "Vector reinterpret cast operation"]
21880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21881#[inline(always)]
21882#[cfg(target_endian = "little")]
21883#[target_feature(enable = "neon")]
21884#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21885#[cfg(not(target_arch = "arm64ec"))]
21886#[cfg_attr(test, assert_instr(nop))]
21887pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21888 unsafe { transmute(a) }
21889}
21890#[doc = "Vector reinterpret cast operation"]
21891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21892#[inline(always)]
21893#[cfg(target_endian = "big")]
21894#[target_feature(enable = "neon")]
21895#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21896#[cfg(not(target_arch = "arm64ec"))]
21897#[cfg_attr(test, assert_instr(nop))]
21898pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21899 unsafe {
21900 let ret_val: float16x4_t = transmute(a);
21901 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21902 }
21903}
21904#[doc = "Vector reinterpret cast operation"]
21905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21906#[inline(always)]
21907#[cfg(target_endian = "little")]
21908#[target_feature(enable = "neon")]
21909#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21910#[cfg(not(target_arch = "arm64ec"))]
21911#[cfg_attr(test, assert_instr(nop))]
21912pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21913 unsafe { transmute(a) }
21914}
21915#[doc = "Vector reinterpret cast operation"]
21916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21917#[inline(always)]
21918#[cfg(target_endian = "big")]
21919#[target_feature(enable = "neon")]
21920#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
21921#[cfg(not(target_arch = "arm64ec"))]
21922#[cfg_attr(test, assert_instr(nop))]
21923pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21924 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21925 unsafe {
21926 let ret_val: float16x8_t = transmute(a);
21927 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21928 }
21929}
21930#[doc = "Vector reinterpret cast operation"]
21931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21932#[inline(always)]
21933#[cfg(target_endian = "little")]
21934#[target_feature(enable = "neon")]
21935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21936#[cfg_attr(test, assert_instr(nop))]
21937pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21938 unsafe { transmute(a) }
21939}
21940#[doc = "Vector reinterpret cast operation"]
21941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21942#[inline(always)]
21943#[cfg(target_endian = "big")]
21944#[target_feature(enable = "neon")]
21945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21946#[cfg_attr(test, assert_instr(nop))]
21947pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21948 unsafe {
21949 let ret_val: float64x2_t = transmute(a);
21950 simd_shuffle!(ret_val, ret_val, [1, 0])
21951 }
21952}
21953#[doc = "Vector reinterpret cast operation"]
21954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21955#[inline(always)]
21956#[cfg(target_endian = "little")]
21957#[target_feature(enable = "neon")]
21958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21959#[cfg_attr(test, assert_instr(nop))]
21960pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21961 unsafe { transmute(a) }
21962}
21963#[doc = "Vector reinterpret cast operation"]
21964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21965#[inline(always)]
21966#[cfg(target_endian = "big")]
21967#[target_feature(enable = "neon")]
21968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21969#[cfg_attr(test, assert_instr(nop))]
21970pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21971 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21972 unsafe { transmute(a) }
21973}
21974#[doc = "Vector reinterpret cast operation"]
21975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21976#[inline(always)]
21977#[cfg(target_endian = "little")]
21978#[target_feature(enable = "neon")]
21979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21980#[cfg_attr(test, assert_instr(nop))]
21981pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21982 unsafe { transmute(a) }
21983}
21984#[doc = "Vector reinterpret cast operation"]
21985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21986#[inline(always)]
21987#[cfg(target_endian = "big")]
21988#[target_feature(enable = "neon")]
21989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21990#[cfg_attr(test, assert_instr(nop))]
21991pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21992 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21993 unsafe { transmute(a) }
21994}
21995#[doc = "Vector reinterpret cast operation"]
21996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21997#[inline(always)]
21998#[cfg(target_endian = "little")]
21999#[target_feature(enable = "neon")]
22000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22001#[cfg_attr(test, assert_instr(nop))]
22002pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
22003 unsafe { transmute(a) }
22004}
22005#[doc = "Vector reinterpret cast operation"]
22006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
22007#[inline(always)]
22008#[cfg(target_endian = "big")]
22009#[target_feature(enable = "neon")]
22010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22011#[cfg_attr(test, assert_instr(nop))]
22012pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
22013 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22014 unsafe {
22015 let ret_val: float64x2_t = transmute(a);
22016 simd_shuffle!(ret_val, ret_val, [1, 0])
22017 }
22018}
22019#[doc = "Vector reinterpret cast operation"]
22020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
22021#[inline(always)]
22022#[cfg(target_endian = "little")]
22023#[target_feature(enable = "neon")]
22024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22025#[cfg_attr(test, assert_instr(nop))]
22026pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
22027 unsafe { transmute(a) }
22028}
22029#[doc = "Vector reinterpret cast operation"]
22030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
22031#[inline(always)]
22032#[cfg(target_endian = "big")]
22033#[target_feature(enable = "neon")]
22034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22035#[cfg_attr(test, assert_instr(nop))]
22036pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
22037 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22038 unsafe {
22039 let ret_val: poly64x2_t = transmute(a);
22040 simd_shuffle!(ret_val, ret_val, [1, 0])
22041 }
22042}
22043#[doc = "Vector reinterpret cast operation"]
22044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
22045#[inline(always)]
22046#[cfg(target_endian = "little")]
22047#[target_feature(enable = "neon")]
22048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22049#[cfg_attr(test, assert_instr(nop))]
22050pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
22051 unsafe { transmute(a) }
22052}
22053#[doc = "Vector reinterpret cast operation"]
22054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
22055#[inline(always)]
22056#[cfg(target_endian = "big")]
22057#[target_feature(enable = "neon")]
22058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22059#[cfg_attr(test, assert_instr(nop))]
22060pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
22061 unsafe {
22062 let ret_val: float32x2_t = transmute(a);
22063 simd_shuffle!(ret_val, ret_val, [1, 0])
22064 }
22065}
22066#[doc = "Vector reinterpret cast operation"]
22067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
22068#[inline(always)]
22069#[cfg(target_endian = "little")]
22070#[target_feature(enable = "neon")]
22071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22072#[cfg_attr(test, assert_instr(nop))]
22073pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
22074 unsafe { transmute(a) }
22075}
22076#[doc = "Vector reinterpret cast operation"]
22077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
22078#[inline(always)]
22079#[cfg(target_endian = "big")]
22080#[target_feature(enable = "neon")]
22081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22082#[cfg_attr(test, assert_instr(nop))]
22083pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
22084 unsafe {
22085 let ret_val: int8x8_t = transmute(a);
22086 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22087 }
22088}
22089#[doc = "Vector reinterpret cast operation"]
22090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22091#[inline(always)]
22092#[cfg(target_endian = "little")]
22093#[target_feature(enable = "neon")]
22094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22095#[cfg_attr(test, assert_instr(nop))]
22096pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22097 unsafe { transmute(a) }
22098}
22099#[doc = "Vector reinterpret cast operation"]
22100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22101#[inline(always)]
22102#[cfg(target_endian = "big")]
22103#[target_feature(enable = "neon")]
22104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22105#[cfg_attr(test, assert_instr(nop))]
22106pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22107 unsafe {
22108 let ret_val: int16x4_t = transmute(a);
22109 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22110 }
22111}
22112#[doc = "Vector reinterpret cast operation"]
22113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22114#[inline(always)]
22115#[cfg(target_endian = "little")]
22116#[target_feature(enable = "neon")]
22117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22118#[cfg_attr(test, assert_instr(nop))]
22119pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22120 unsafe { transmute(a) }
22121}
22122#[doc = "Vector reinterpret cast operation"]
22123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22124#[inline(always)]
22125#[cfg(target_endian = "big")]
22126#[target_feature(enable = "neon")]
22127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22128#[cfg_attr(test, assert_instr(nop))]
22129pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22130 unsafe {
22131 let ret_val: int32x2_t = transmute(a);
22132 simd_shuffle!(ret_val, ret_val, [1, 0])
22133 }
22134}
22135#[doc = "Vector reinterpret cast operation"]
22136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
22137#[inline(always)]
22138#[target_feature(enable = "neon")]
22139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22140#[cfg_attr(test, assert_instr(nop))]
22141pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
22142 unsafe { transmute(a) }
22143}
22144#[doc = "Vector reinterpret cast operation"]
22145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22146#[inline(always)]
22147#[cfg(target_endian = "little")]
22148#[target_feature(enable = "neon")]
22149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22150#[cfg_attr(test, assert_instr(nop))]
22151pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22152 unsafe { transmute(a) }
22153}
22154#[doc = "Vector reinterpret cast operation"]
22155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22156#[inline(always)]
22157#[cfg(target_endian = "big")]
22158#[target_feature(enable = "neon")]
22159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22160#[cfg_attr(test, assert_instr(nop))]
22161pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22162 unsafe {
22163 let ret_val: uint8x8_t = transmute(a);
22164 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22165 }
22166}
22167#[doc = "Vector reinterpret cast operation"]
22168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22169#[inline(always)]
22170#[cfg(target_endian = "little")]
22171#[target_feature(enable = "neon")]
22172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22173#[cfg_attr(test, assert_instr(nop))]
22174pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22175 unsafe { transmute(a) }
22176}
22177#[doc = "Vector reinterpret cast operation"]
22178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22179#[inline(always)]
22180#[cfg(target_endian = "big")]
22181#[target_feature(enable = "neon")]
22182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22183#[cfg_attr(test, assert_instr(nop))]
22184pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22185 unsafe {
22186 let ret_val: uint16x4_t = transmute(a);
22187 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22188 }
22189}
22190#[doc = "Vector reinterpret cast operation"]
22191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22192#[inline(always)]
22193#[cfg(target_endian = "little")]
22194#[target_feature(enable = "neon")]
22195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22196#[cfg_attr(test, assert_instr(nop))]
22197pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22198 unsafe { transmute(a) }
22199}
22200#[doc = "Vector reinterpret cast operation"]
22201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22202#[inline(always)]
22203#[cfg(target_endian = "big")]
22204#[target_feature(enable = "neon")]
22205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22206#[cfg_attr(test, assert_instr(nop))]
22207pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22208 unsafe {
22209 let ret_val: uint32x2_t = transmute(a);
22210 simd_shuffle!(ret_val, ret_val, [1, 0])
22211 }
22212}
22213#[doc = "Vector reinterpret cast operation"]
22214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
22215#[inline(always)]
22216#[target_feature(enable = "neon")]
22217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22218#[cfg_attr(test, assert_instr(nop))]
22219pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
22220 unsafe { transmute(a) }
22221}
22222#[doc = "Vector reinterpret cast operation"]
22223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22224#[inline(always)]
22225#[cfg(target_endian = "little")]
22226#[target_feature(enable = "neon")]
22227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22228#[cfg_attr(test, assert_instr(nop))]
22229pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22230 unsafe { transmute(a) }
22231}
22232#[doc = "Vector reinterpret cast operation"]
22233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22234#[inline(always)]
22235#[cfg(target_endian = "big")]
22236#[target_feature(enable = "neon")]
22237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22238#[cfg_attr(test, assert_instr(nop))]
22239pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22240 unsafe {
22241 let ret_val: poly8x8_t = transmute(a);
22242 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22243 }
22244}
22245#[doc = "Vector reinterpret cast operation"]
22246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22247#[inline(always)]
22248#[cfg(target_endian = "little")]
22249#[target_feature(enable = "neon")]
22250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22251#[cfg_attr(test, assert_instr(nop))]
22252pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22253 unsafe { transmute(a) }
22254}
22255#[doc = "Vector reinterpret cast operation"]
22256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22257#[inline(always)]
22258#[cfg(target_endian = "big")]
22259#[target_feature(enable = "neon")]
22260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22261#[cfg_attr(test, assert_instr(nop))]
22262pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22263 unsafe {
22264 let ret_val: poly16x4_t = transmute(a);
22265 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22266 }
22267}
22268#[doc = "Vector reinterpret cast operation"]
22269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
22270#[inline(always)]
22271#[target_feature(enable = "neon")]
22272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22273#[cfg_attr(test, assert_instr(nop))]
22274pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
22275 unsafe { transmute(a) }
22276}
22277#[doc = "Vector reinterpret cast operation"]
22278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22279#[inline(always)]
22280#[cfg(target_endian = "little")]
22281#[target_feature(enable = "neon")]
22282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22283#[cfg_attr(test, assert_instr(nop))]
22284pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22285 unsafe { transmute(a) }
22286}
22287#[doc = "Vector reinterpret cast operation"]
22288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22289#[inline(always)]
22290#[cfg(target_endian = "big")]
22291#[target_feature(enable = "neon")]
22292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22293#[cfg_attr(test, assert_instr(nop))]
22294pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22295 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22296 unsafe { transmute(a) }
22297}
22298#[doc = "Vector reinterpret cast operation"]
22299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22300#[inline(always)]
22301#[cfg(target_endian = "little")]
22302#[target_feature(enable = "neon")]
22303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22304#[cfg_attr(test, assert_instr(nop))]
22305pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22306 unsafe { transmute(a) }
22307}
22308#[doc = "Vector reinterpret cast operation"]
22309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22310#[inline(always)]
22311#[cfg(target_endian = "big")]
22312#[target_feature(enable = "neon")]
22313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22314#[cfg_attr(test, assert_instr(nop))]
22315pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22316 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22317 unsafe {
22318 let ret_val: float32x4_t = transmute(a);
22319 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22320 }
22321}
22322#[doc = "Vector reinterpret cast operation"]
22323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22324#[inline(always)]
22325#[cfg(target_endian = "little")]
22326#[target_feature(enable = "neon")]
22327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22328#[cfg_attr(test, assert_instr(nop))]
22329pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22330 unsafe { transmute(a) }
22331}
22332#[doc = "Vector reinterpret cast operation"]
22333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22334#[inline(always)]
22335#[cfg(target_endian = "big")]
22336#[target_feature(enable = "neon")]
22337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22338#[cfg_attr(test, assert_instr(nop))]
22339pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22340 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22341 unsafe {
22342 let ret_val: int8x16_t = transmute(a);
22343 simd_shuffle!(
22344 ret_val,
22345 ret_val,
22346 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22347 )
22348 }
22349}
22350#[doc = "Vector reinterpret cast operation"]
22351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22352#[inline(always)]
22353#[cfg(target_endian = "little")]
22354#[target_feature(enable = "neon")]
22355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22356#[cfg_attr(test, assert_instr(nop))]
22357pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22358 unsafe { transmute(a) }
22359}
22360#[doc = "Vector reinterpret cast operation"]
22361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22362#[inline(always)]
22363#[cfg(target_endian = "big")]
22364#[target_feature(enable = "neon")]
22365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22366#[cfg_attr(test, assert_instr(nop))]
22367pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22368 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22369 unsafe {
22370 let ret_val: int16x8_t = transmute(a);
22371 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22372 }
22373}
22374#[doc = "Vector reinterpret cast operation"]
22375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22376#[inline(always)]
22377#[cfg(target_endian = "little")]
22378#[target_feature(enable = "neon")]
22379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22380#[cfg_attr(test, assert_instr(nop))]
22381pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22382 unsafe { transmute(a) }
22383}
22384#[doc = "Vector reinterpret cast operation"]
22385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22386#[inline(always)]
22387#[cfg(target_endian = "big")]
22388#[target_feature(enable = "neon")]
22389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22390#[cfg_attr(test, assert_instr(nop))]
22391pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22392 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22393 unsafe {
22394 let ret_val: int32x4_t = transmute(a);
22395 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22396 }
22397}
22398#[doc = "Vector reinterpret cast operation"]
22399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22400#[inline(always)]
22401#[cfg(target_endian = "little")]
22402#[target_feature(enable = "neon")]
22403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22404#[cfg_attr(test, assert_instr(nop))]
22405pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22406 unsafe { transmute(a) }
22407}
22408#[doc = "Vector reinterpret cast operation"]
22409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22410#[inline(always)]
22411#[cfg(target_endian = "big")]
22412#[target_feature(enable = "neon")]
22413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22414#[cfg_attr(test, assert_instr(nop))]
22415pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22416 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22417 unsafe {
22418 let ret_val: int64x2_t = transmute(a);
22419 simd_shuffle!(ret_val, ret_val, [1, 0])
22420 }
22421}
22422#[doc = "Vector reinterpret cast operation"]
22423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22424#[inline(always)]
22425#[cfg(target_endian = "little")]
22426#[target_feature(enable = "neon")]
22427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22428#[cfg_attr(test, assert_instr(nop))]
22429pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22430 unsafe { transmute(a) }
22431}
22432#[doc = "Vector reinterpret cast operation"]
22433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22434#[inline(always)]
22435#[cfg(target_endian = "big")]
22436#[target_feature(enable = "neon")]
22437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22438#[cfg_attr(test, assert_instr(nop))]
22439pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22440 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22441 unsafe {
22442 let ret_val: uint8x16_t = transmute(a);
22443 simd_shuffle!(
22444 ret_val,
22445 ret_val,
22446 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22447 )
22448 }
22449}
22450#[doc = "Vector reinterpret cast operation"]
22451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22452#[inline(always)]
22453#[cfg(target_endian = "little")]
22454#[target_feature(enable = "neon")]
22455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22456#[cfg_attr(test, assert_instr(nop))]
22457pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22458 unsafe { transmute(a) }
22459}
22460#[doc = "Vector reinterpret cast operation"]
22461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22462#[inline(always)]
22463#[cfg(target_endian = "big")]
22464#[target_feature(enable = "neon")]
22465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22466#[cfg_attr(test, assert_instr(nop))]
22467pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22468 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22469 unsafe {
22470 let ret_val: uint16x8_t = transmute(a);
22471 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22472 }
22473}
22474#[doc = "Vector reinterpret cast operation"]
22475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22476#[inline(always)]
22477#[cfg(target_endian = "little")]
22478#[target_feature(enable = "neon")]
22479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22480#[cfg_attr(test, assert_instr(nop))]
22481pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22482 unsafe { transmute(a) }
22483}
22484#[doc = "Vector reinterpret cast operation"]
22485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22486#[inline(always)]
22487#[cfg(target_endian = "big")]
22488#[target_feature(enable = "neon")]
22489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22490#[cfg_attr(test, assert_instr(nop))]
22491pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22492 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22493 unsafe {
22494 let ret_val: uint32x4_t = transmute(a);
22495 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22496 }
22497}
22498#[doc = "Vector reinterpret cast operation"]
22499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22500#[inline(always)]
22501#[cfg(target_endian = "little")]
22502#[target_feature(enable = "neon")]
22503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22504#[cfg_attr(test, assert_instr(nop))]
22505pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22506 unsafe { transmute(a) }
22507}
22508#[doc = "Vector reinterpret cast operation"]
22509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22510#[inline(always)]
22511#[cfg(target_endian = "big")]
22512#[target_feature(enable = "neon")]
22513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22514#[cfg_attr(test, assert_instr(nop))]
22515pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22516 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22517 unsafe {
22518 let ret_val: uint64x2_t = transmute(a);
22519 simd_shuffle!(ret_val, ret_val, [1, 0])
22520 }
22521}
22522#[doc = "Vector reinterpret cast operation"]
22523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22524#[inline(always)]
22525#[cfg(target_endian = "little")]
22526#[target_feature(enable = "neon")]
22527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22528#[cfg_attr(test, assert_instr(nop))]
22529pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22530 unsafe { transmute(a) }
22531}
22532#[doc = "Vector reinterpret cast operation"]
22533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22534#[inline(always)]
22535#[cfg(target_endian = "big")]
22536#[target_feature(enable = "neon")]
22537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22538#[cfg_attr(test, assert_instr(nop))]
22539pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22540 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22541 unsafe {
22542 let ret_val: poly8x16_t = transmute(a);
22543 simd_shuffle!(
22544 ret_val,
22545 ret_val,
22546 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22547 )
22548 }
22549}
22550#[doc = "Vector reinterpret cast operation"]
22551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22552#[inline(always)]
22553#[cfg(target_endian = "little")]
22554#[target_feature(enable = "neon")]
22555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22556#[cfg_attr(test, assert_instr(nop))]
22557pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22558 unsafe { transmute(a) }
22559}
22560#[doc = "Vector reinterpret cast operation"]
22561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22562#[inline(always)]
22563#[cfg(target_endian = "big")]
22564#[target_feature(enable = "neon")]
22565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22566#[cfg_attr(test, assert_instr(nop))]
22567pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22568 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22569 unsafe {
22570 let ret_val: poly16x8_t = transmute(a);
22571 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22572 }
22573}
22574#[doc = "Vector reinterpret cast operation"]
22575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22576#[inline(always)]
22577#[cfg(target_endian = "little")]
22578#[target_feature(enable = "neon")]
22579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22580#[cfg_attr(test, assert_instr(nop))]
22581pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22582 unsafe { transmute(a) }
22583}
22584#[doc = "Vector reinterpret cast operation"]
22585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22586#[inline(always)]
22587#[cfg(target_endian = "big")]
22588#[target_feature(enable = "neon")]
22589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22590#[cfg_attr(test, assert_instr(nop))]
22591pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22592 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22593 unsafe {
22594 let ret_val: poly64x2_t = transmute(a);
22595 simd_shuffle!(ret_val, ret_val, [1, 0])
22596 }
22597}
22598#[doc = "Vector reinterpret cast operation"]
22599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22600#[inline(always)]
22601#[cfg(target_endian = "little")]
22602#[target_feature(enable = "neon")]
22603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22604#[cfg_attr(test, assert_instr(nop))]
22605pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22606 unsafe { transmute(a) }
22607}
22608#[doc = "Vector reinterpret cast operation"]
22609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22610#[inline(always)]
22611#[cfg(target_endian = "big")]
22612#[target_feature(enable = "neon")]
22613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22614#[cfg_attr(test, assert_instr(nop))]
22615pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22616 let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22617 unsafe { transmute(a) }
22618}
22619#[doc = "Vector reinterpret cast operation"]
22620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22621#[inline(always)]
22622#[cfg(target_endian = "little")]
22623#[target_feature(enable = "neon")]
22624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22625#[cfg_attr(test, assert_instr(nop))]
22626pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22627 unsafe { transmute(a) }
22628}
22629#[doc = "Vector reinterpret cast operation"]
22630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22631#[inline(always)]
22632#[cfg(target_endian = "big")]
22633#[target_feature(enable = "neon")]
22634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22635#[cfg_attr(test, assert_instr(nop))]
22636pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22637 let a: int8x16_t =
22638 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22639 unsafe {
22640 let ret_val: float64x2_t = transmute(a);
22641 simd_shuffle!(ret_val, ret_val, [1, 0])
22642 }
22643}
22644#[doc = "Vector reinterpret cast operation"]
22645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22646#[inline(always)]
22647#[cfg(target_endian = "little")]
22648#[target_feature(enable = "neon")]
22649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22650#[cfg_attr(test, assert_instr(nop))]
22651pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22652 unsafe { transmute(a) }
22653}
22654#[doc = "Vector reinterpret cast operation"]
22655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22656#[inline(always)]
22657#[cfg(target_endian = "big")]
22658#[target_feature(enable = "neon")]
22659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22660#[cfg_attr(test, assert_instr(nop))]
22661pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22662 let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22663 unsafe { transmute(a) }
22664}
22665#[doc = "Vector reinterpret cast operation"]
22666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22667#[inline(always)]
22668#[cfg(target_endian = "little")]
22669#[target_feature(enable = "neon")]
22670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22671#[cfg_attr(test, assert_instr(nop))]
22672pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22673 unsafe { transmute(a) }
22674}
22675#[doc = "Vector reinterpret cast operation"]
22676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22677#[inline(always)]
22678#[cfg(target_endian = "big")]
22679#[target_feature(enable = "neon")]
22680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22681#[cfg_attr(test, assert_instr(nop))]
22682pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22683 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22684 unsafe {
22685 let ret_val: float64x2_t = transmute(a);
22686 simd_shuffle!(ret_val, ret_val, [1, 0])
22687 }
22688}
22689#[doc = "Vector reinterpret cast operation"]
22690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22691#[inline(always)]
22692#[cfg(target_endian = "little")]
22693#[target_feature(enable = "neon")]
22694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22695#[cfg_attr(test, assert_instr(nop))]
22696pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22697 unsafe { transmute(a) }
22698}
22699#[doc = "Vector reinterpret cast operation"]
22700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22701#[inline(always)]
22702#[cfg(target_endian = "big")]
22703#[target_feature(enable = "neon")]
22704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22705#[cfg_attr(test, assert_instr(nop))]
22706pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22707 let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22708 unsafe { transmute(a) }
22709}
22710#[doc = "Vector reinterpret cast operation"]
22711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22712#[inline(always)]
22713#[cfg(target_endian = "little")]
22714#[target_feature(enable = "neon")]
22715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22716#[cfg_attr(test, assert_instr(nop))]
22717pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22718 unsafe { transmute(a) }
22719}
22720#[doc = "Vector reinterpret cast operation"]
22721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22722#[inline(always)]
22723#[cfg(target_endian = "big")]
22724#[target_feature(enable = "neon")]
22725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22726#[cfg_attr(test, assert_instr(nop))]
22727pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22728 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22729 unsafe {
22730 let ret_val: float64x2_t = transmute(a);
22731 simd_shuffle!(ret_val, ret_val, [1, 0])
22732 }
22733}
22734#[doc = "Vector reinterpret cast operation"]
22735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22736#[inline(always)]
22737#[target_feature(enable = "neon")]
22738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22739#[cfg_attr(test, assert_instr(nop))]
22740pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22741 unsafe { transmute(a) }
22742}
22743#[doc = "Vector reinterpret cast operation"]
22744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22745#[inline(always)]
22746#[target_feature(enable = "neon")]
22747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22748#[cfg_attr(test, assert_instr(nop))]
22749pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22750 unsafe { transmute(a) }
22751}
22752#[doc = "Vector reinterpret cast operation"]
22753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22754#[inline(always)]
22755#[cfg(target_endian = "little")]
22756#[target_feature(enable = "neon")]
22757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22758#[cfg_attr(test, assert_instr(nop))]
22759pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22760 unsafe { transmute(a) }
22761}
22762#[doc = "Vector reinterpret cast operation"]
22763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22764#[inline(always)]
22765#[cfg(target_endian = "big")]
22766#[target_feature(enable = "neon")]
22767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22768#[cfg_attr(test, assert_instr(nop))]
22769pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22770 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22771 unsafe {
22772 let ret_val: float64x2_t = transmute(a);
22773 simd_shuffle!(ret_val, ret_val, [1, 0])
22774 }
22775}
22776#[doc = "Vector reinterpret cast operation"]
22777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22778#[inline(always)]
22779#[cfg(target_endian = "little")]
22780#[target_feature(enable = "neon")]
22781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22782#[cfg_attr(test, assert_instr(nop))]
22783pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22784 unsafe { transmute(a) }
22785}
22786#[doc = "Vector reinterpret cast operation"]
22787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22788#[inline(always)]
22789#[cfg(target_endian = "big")]
22790#[target_feature(enable = "neon")]
22791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22792#[cfg_attr(test, assert_instr(nop))]
22793pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22794 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22795 unsafe {
22796 let ret_val: poly64x2_t = transmute(a);
22797 simd_shuffle!(ret_val, ret_val, [1, 0])
22798 }
22799}
22800#[doc = "Vector reinterpret cast operation"]
22801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22802#[inline(always)]
22803#[cfg(target_endian = "little")]
22804#[target_feature(enable = "neon")]
22805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22806#[cfg_attr(test, assert_instr(nop))]
22807pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22808 unsafe { transmute(a) }
22809}
22810#[doc = "Vector reinterpret cast operation"]
22811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22812#[inline(always)]
22813#[cfg(target_endian = "big")]
22814#[target_feature(enable = "neon")]
22815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22816#[cfg_attr(test, assert_instr(nop))]
22817pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22818 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22819 unsafe { transmute(a) }
22820}
22821#[doc = "Vector reinterpret cast operation"]
22822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22823#[inline(always)]
22824#[cfg(target_endian = "little")]
22825#[target_feature(enable = "neon")]
22826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22827#[cfg_attr(test, assert_instr(nop))]
22828pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22829 unsafe { transmute(a) }
22830}
22831#[doc = "Vector reinterpret cast operation"]
22832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22833#[inline(always)]
22834#[cfg(target_endian = "big")]
22835#[target_feature(enable = "neon")]
22836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22837#[cfg_attr(test, assert_instr(nop))]
22838pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22839 let a: uint8x16_t =
22840 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22841 unsafe {
22842 let ret_val: float64x2_t = transmute(a);
22843 simd_shuffle!(ret_val, ret_val, [1, 0])
22844 }
22845}
22846#[doc = "Vector reinterpret cast operation"]
22847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22848#[inline(always)]
22849#[cfg(target_endian = "little")]
22850#[target_feature(enable = "neon")]
22851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22852#[cfg_attr(test, assert_instr(nop))]
22853pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22854 unsafe { transmute(a) }
22855}
22856#[doc = "Vector reinterpret cast operation"]
22857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22858#[inline(always)]
22859#[cfg(target_endian = "big")]
22860#[target_feature(enable = "neon")]
22861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22862#[cfg_attr(test, assert_instr(nop))]
22863pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22864 let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22865 unsafe { transmute(a) }
22866}
22867#[doc = "Vector reinterpret cast operation"]
22868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22869#[inline(always)]
22870#[cfg(target_endian = "little")]
22871#[target_feature(enable = "neon")]
22872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22873#[cfg_attr(test, assert_instr(nop))]
22874pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22875 unsafe { transmute(a) }
22876}
22877#[doc = "Vector reinterpret cast operation"]
22878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22879#[inline(always)]
22880#[cfg(target_endian = "big")]
22881#[target_feature(enable = "neon")]
22882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22883#[cfg_attr(test, assert_instr(nop))]
22884pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22885 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22886 unsafe {
22887 let ret_val: float64x2_t = transmute(a);
22888 simd_shuffle!(ret_val, ret_val, [1, 0])
22889 }
22890}
22891#[doc = "Vector reinterpret cast operation"]
22892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22893#[inline(always)]
22894#[cfg(target_endian = "little")]
22895#[target_feature(enable = "neon")]
22896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22897#[cfg_attr(test, assert_instr(nop))]
22898pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22899 unsafe { transmute(a) }
22900}
22901#[doc = "Vector reinterpret cast operation"]
22902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22903#[inline(always)]
22904#[cfg(target_endian = "big")]
22905#[target_feature(enable = "neon")]
22906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22907#[cfg_attr(test, assert_instr(nop))]
22908pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22909 let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22910 unsafe { transmute(a) }
22911}
22912#[doc = "Vector reinterpret cast operation"]
22913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22914#[inline(always)]
22915#[cfg(target_endian = "little")]
22916#[target_feature(enable = "neon")]
22917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22918#[cfg_attr(test, assert_instr(nop))]
22919pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22920 unsafe { transmute(a) }
22921}
22922#[doc = "Vector reinterpret cast operation"]
22923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22924#[inline(always)]
22925#[cfg(target_endian = "big")]
22926#[target_feature(enable = "neon")]
22927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22928#[cfg_attr(test, assert_instr(nop))]
22929pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22930 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22931 unsafe {
22932 let ret_val: float64x2_t = transmute(a);
22933 simd_shuffle!(ret_val, ret_val, [1, 0])
22934 }
22935}
22936#[doc = "Vector reinterpret cast operation"]
22937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22938#[inline(always)]
22939#[target_feature(enable = "neon")]
22940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22941#[cfg_attr(test, assert_instr(nop))]
22942pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22943 unsafe { transmute(a) }
22944}
22945#[doc = "Vector reinterpret cast operation"]
22946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22947#[inline(always)]
22948#[target_feature(enable = "neon")]
22949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22950#[cfg_attr(test, assert_instr(nop))]
22951pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22952 unsafe { transmute(a) }
22953}
22954#[doc = "Vector reinterpret cast operation"]
22955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22956#[inline(always)]
22957#[cfg(target_endian = "little")]
22958#[target_feature(enable = "neon")]
22959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22960#[cfg_attr(test, assert_instr(nop))]
22961pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22962 unsafe { transmute(a) }
22963}
22964#[doc = "Vector reinterpret cast operation"]
22965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22966#[inline(always)]
22967#[cfg(target_endian = "big")]
22968#[target_feature(enable = "neon")]
22969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22970#[cfg_attr(test, assert_instr(nop))]
22971pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22972 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22973 unsafe {
22974 let ret_val: float64x2_t = transmute(a);
22975 simd_shuffle!(ret_val, ret_val, [1, 0])
22976 }
22977}
22978#[doc = "Vector reinterpret cast operation"]
22979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22980#[inline(always)]
22981#[cfg(target_endian = "little")]
22982#[target_feature(enable = "neon")]
22983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22984#[cfg_attr(test, assert_instr(nop))]
22985pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22986 unsafe { transmute(a) }
22987}
22988#[doc = "Vector reinterpret cast operation"]
22989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22990#[inline(always)]
22991#[cfg(target_endian = "big")]
22992#[target_feature(enable = "neon")]
22993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22994#[cfg_attr(test, assert_instr(nop))]
22995pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22996 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22997 unsafe {
22998 let ret_val: poly64x2_t = transmute(a);
22999 simd_shuffle!(ret_val, ret_val, [1, 0])
23000 }
23001}
23002#[doc = "Vector reinterpret cast operation"]
23003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
23004#[inline(always)]
23005#[cfg(target_endian = "little")]
23006#[target_feature(enable = "neon")]
23007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23008#[cfg_attr(test, assert_instr(nop))]
23009pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
23010 unsafe { transmute(a) }
23011}
23012#[doc = "Vector reinterpret cast operation"]
23013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
23014#[inline(always)]
23015#[cfg(target_endian = "big")]
23016#[target_feature(enable = "neon")]
23017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23018#[cfg_attr(test, assert_instr(nop))]
23019pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
23020 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
23021 unsafe { transmute(a) }
23022}
23023#[doc = "Vector reinterpret cast operation"]
23024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
23025#[inline(always)]
23026#[cfg(target_endian = "little")]
23027#[target_feature(enable = "neon")]
23028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23029#[cfg_attr(test, assert_instr(nop))]
23030pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
23031 unsafe { transmute(a) }
23032}
23033#[doc = "Vector reinterpret cast operation"]
23034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
23035#[inline(always)]
23036#[cfg(target_endian = "big")]
23037#[target_feature(enable = "neon")]
23038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23039#[cfg_attr(test, assert_instr(nop))]
23040pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
23041 let a: poly8x16_t =
23042 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
23043 unsafe {
23044 let ret_val: float64x2_t = transmute(a);
23045 simd_shuffle!(ret_val, ret_val, [1, 0])
23046 }
23047}
23048#[doc = "Vector reinterpret cast operation"]
23049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
23050#[inline(always)]
23051#[cfg(target_endian = "little")]
23052#[target_feature(enable = "neon")]
23053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23054#[cfg_attr(test, assert_instr(nop))]
23055pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
23056 unsafe { transmute(a) }
23057}
23058#[doc = "Vector reinterpret cast operation"]
23059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
23060#[inline(always)]
23061#[cfg(target_endian = "big")]
23062#[target_feature(enable = "neon")]
23063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23064#[cfg_attr(test, assert_instr(nop))]
23065pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
23066 let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
23067 unsafe { transmute(a) }
23068}
23069#[doc = "Vector reinterpret cast operation"]
23070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
23071#[inline(always)]
23072#[cfg(target_endian = "little")]
23073#[target_feature(enable = "neon")]
23074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23075#[cfg_attr(test, assert_instr(nop))]
23076pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
23077 unsafe { transmute(a) }
23078}
23079#[doc = "Vector reinterpret cast operation"]
23080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
23081#[inline(always)]
23082#[cfg(target_endian = "big")]
23083#[target_feature(enable = "neon")]
23084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23085#[cfg_attr(test, assert_instr(nop))]
23086pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
23087 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
23088 unsafe {
23089 let ret_val: float64x2_t = transmute(a);
23090 simd_shuffle!(ret_val, ret_val, [1, 0])
23091 }
23092}
23093#[doc = "Vector reinterpret cast operation"]
23094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23095#[inline(always)]
23096#[cfg(target_endian = "little")]
23097#[target_feature(enable = "neon")]
23098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23099#[cfg_attr(test, assert_instr(nop))]
23100pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23101 unsafe { transmute(a) }
23102}
23103#[doc = "Vector reinterpret cast operation"]
23104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23105#[inline(always)]
23106#[cfg(target_endian = "big")]
23107#[target_feature(enable = "neon")]
23108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23109#[cfg_attr(test, assert_instr(nop))]
23110pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23111 unsafe {
23112 let ret_val: float32x2_t = transmute(a);
23113 simd_shuffle!(ret_val, ret_val, [1, 0])
23114 }
23115}
23116#[doc = "Vector reinterpret cast operation"]
23117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
23118#[inline(always)]
23119#[target_feature(enable = "neon")]
23120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23121#[cfg_attr(test, assert_instr(nop))]
23122pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
23123 unsafe { transmute(a) }
23124}
23125#[doc = "Vector reinterpret cast operation"]
23126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
23127#[inline(always)]
23128#[target_feature(enable = "neon")]
23129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23130#[cfg_attr(test, assert_instr(nop))]
23131pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
23132 unsafe { transmute(a) }
23133}
23134#[doc = "Vector reinterpret cast operation"]
23135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
23136#[inline(always)]
23137#[target_feature(enable = "neon")]
23138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23139#[cfg_attr(test, assert_instr(nop))]
23140pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
23141 unsafe { transmute(a) }
23142}
23143#[doc = "Vector reinterpret cast operation"]
23144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23145#[inline(always)]
23146#[cfg(target_endian = "little")]
23147#[target_feature(enable = "neon")]
23148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23149#[cfg_attr(test, assert_instr(nop))]
23150pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23151 unsafe { transmute(a) }
23152}
23153#[doc = "Vector reinterpret cast operation"]
23154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23155#[inline(always)]
23156#[cfg(target_endian = "big")]
23157#[target_feature(enable = "neon")]
23158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23159#[cfg_attr(test, assert_instr(nop))]
23160pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23161 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23162 unsafe {
23163 let ret_val: float32x4_t = transmute(a);
23164 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
23165 }
23166}
23167#[doc = "Vector reinterpret cast operation"]
23168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23169#[inline(always)]
23170#[cfg(target_endian = "little")]
23171#[target_feature(enable = "neon")]
23172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23173#[cfg_attr(test, assert_instr(nop))]
23174pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23175 unsafe { transmute(a) }
23176}
23177#[doc = "Vector reinterpret cast operation"]
23178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23179#[inline(always)]
23180#[cfg(target_endian = "big")]
23181#[target_feature(enable = "neon")]
23182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23183#[cfg_attr(test, assert_instr(nop))]
23184pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23185 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23186 unsafe {
23187 let ret_val: float64x2_t = transmute(a);
23188 simd_shuffle!(ret_val, ret_val, [1, 0])
23189 }
23190}
23191#[doc = "Vector reinterpret cast operation"]
23192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23193#[inline(always)]
23194#[cfg(target_endian = "little")]
23195#[target_feature(enable = "neon")]
23196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23197#[cfg_attr(test, assert_instr(nop))]
23198pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23199 unsafe { transmute(a) }
23200}
23201#[doc = "Vector reinterpret cast operation"]
23202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23203#[inline(always)]
23204#[cfg(target_endian = "big")]
23205#[target_feature(enable = "neon")]
23206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23207#[cfg_attr(test, assert_instr(nop))]
23208pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23209 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23210 unsafe {
23211 let ret_val: int64x2_t = transmute(a);
23212 simd_shuffle!(ret_val, ret_val, [1, 0])
23213 }
23214}
23215#[doc = "Vector reinterpret cast operation"]
23216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23217#[inline(always)]
23218#[cfg(target_endian = "little")]
23219#[target_feature(enable = "neon")]
23220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23221#[cfg_attr(test, assert_instr(nop))]
23222pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23223 unsafe { transmute(a) }
23224}
23225#[doc = "Vector reinterpret cast operation"]
23226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23227#[inline(always)]
23228#[cfg(target_endian = "big")]
23229#[target_feature(enable = "neon")]
23230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23231#[cfg_attr(test, assert_instr(nop))]
23232pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23233 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23234 unsafe {
23235 let ret_val: uint64x2_t = transmute(a);
23236 simd_shuffle!(ret_val, ret_val, [1, 0])
23237 }
23238}
23239#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
23241#[inline(always)]
23242#[target_feature(enable = "neon,frintts")]
23243#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23244#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23245pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
23246 unsafe extern "unadjusted" {
23247 #[cfg_attr(
23248 any(target_arch = "aarch64", target_arch = "arm64ec"),
23249 link_name = "llvm.aarch64.neon.frint32x.v2f32"
23250 )]
23251 fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
23252 }
23253 unsafe { _vrnd32x_f32(a) }
23254}
23255#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
23257#[inline(always)]
23258#[target_feature(enable = "neon,frintts")]
23259#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23260#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23261pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
23262 unsafe extern "unadjusted" {
23263 #[cfg_attr(
23264 any(target_arch = "aarch64", target_arch = "arm64ec"),
23265 link_name = "llvm.aarch64.neon.frint32x.v4f32"
23266 )]
23267 fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
23268 }
23269 unsafe { _vrnd32xq_f32(a) }
23270}
23271#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
23273#[inline(always)]
23274#[target_feature(enable = "neon,frintts")]
23275#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23276#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23277pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
23278 unsafe extern "unadjusted" {
23279 #[cfg_attr(
23280 any(target_arch = "aarch64", target_arch = "arm64ec"),
23281 link_name = "llvm.aarch64.neon.frint32x.v2f64"
23282 )]
23283 fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
23284 }
23285 unsafe { _vrnd32xq_f64(a) }
23286}
23287#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
23289#[inline(always)]
23290#[target_feature(enable = "neon,frintts")]
23291#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23292#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23293pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
23294 unsafe extern "unadjusted" {
23295 #[cfg_attr(
23296 any(target_arch = "aarch64", target_arch = "arm64ec"),
23297 link_name = "llvm.aarch64.frint32x.f64"
23298 )]
23299 fn _vrnd32x_f64(a: f64) -> f64;
23300 }
23301 unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
23302}
23303#[doc = "Floating-point round to 32-bit integer toward zero"]
23304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
23305#[inline(always)]
23306#[target_feature(enable = "neon,frintts")]
23307#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23308#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23309pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
23310 unsafe extern "unadjusted" {
23311 #[cfg_attr(
23312 any(target_arch = "aarch64", target_arch = "arm64ec"),
23313 link_name = "llvm.aarch64.neon.frint32z.v2f32"
23314 )]
23315 fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
23316 }
23317 unsafe { _vrnd32z_f32(a) }
23318}
23319#[doc = "Floating-point round to 32-bit integer toward zero"]
23320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
23321#[inline(always)]
23322#[target_feature(enable = "neon,frintts")]
23323#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23324#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23325pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
23326 unsafe extern "unadjusted" {
23327 #[cfg_attr(
23328 any(target_arch = "aarch64", target_arch = "arm64ec"),
23329 link_name = "llvm.aarch64.neon.frint32z.v4f32"
23330 )]
23331 fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
23332 }
23333 unsafe { _vrnd32zq_f32(a) }
23334}
23335#[doc = "Floating-point round to 32-bit integer toward zero"]
23336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
23337#[inline(always)]
23338#[target_feature(enable = "neon,frintts")]
23339#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23340#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23341pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23342 unsafe extern "unadjusted" {
23343 #[cfg_attr(
23344 any(target_arch = "aarch64", target_arch = "arm64ec"),
23345 link_name = "llvm.aarch64.neon.frint32z.v2f64"
23346 )]
23347 fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23348 }
23349 unsafe { _vrnd32zq_f64(a) }
23350}
23351#[doc = "Floating-point round to 32-bit integer toward zero"]
23352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23353#[inline(always)]
23354#[target_feature(enable = "neon,frintts")]
23355#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23356#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23357pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23358 unsafe extern "unadjusted" {
23359 #[cfg_attr(
23360 any(target_arch = "aarch64", target_arch = "arm64ec"),
23361 link_name = "llvm.aarch64.frint32z.f64"
23362 )]
23363 fn _vrnd32z_f64(a: f64) -> f64;
23364 }
23365 unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23366}
23367#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23369#[inline(always)]
23370#[target_feature(enable = "neon,frintts")]
23371#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23372#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23373pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23374 unsafe extern "unadjusted" {
23375 #[cfg_attr(
23376 any(target_arch = "aarch64", target_arch = "arm64ec"),
23377 link_name = "llvm.aarch64.neon.frint64x.v2f32"
23378 )]
23379 fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23380 }
23381 unsafe { _vrnd64x_f32(a) }
23382}
23383#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23385#[inline(always)]
23386#[target_feature(enable = "neon,frintts")]
23387#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23388#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23389pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23390 unsafe extern "unadjusted" {
23391 #[cfg_attr(
23392 any(target_arch = "aarch64", target_arch = "arm64ec"),
23393 link_name = "llvm.aarch64.neon.frint64x.v4f32"
23394 )]
23395 fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23396 }
23397 unsafe { _vrnd64xq_f32(a) }
23398}
23399#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23401#[inline(always)]
23402#[target_feature(enable = "neon,frintts")]
23403#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23404#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23405pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23406 unsafe extern "unadjusted" {
23407 #[cfg_attr(
23408 any(target_arch = "aarch64", target_arch = "arm64ec"),
23409 link_name = "llvm.aarch64.neon.frint64x.v2f64"
23410 )]
23411 fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23412 }
23413 unsafe { _vrnd64xq_f64(a) }
23414}
23415#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23417#[inline(always)]
23418#[target_feature(enable = "neon,frintts")]
23419#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23420#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23421pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23422 unsafe extern "unadjusted" {
23423 #[cfg_attr(
23424 any(target_arch = "aarch64", target_arch = "arm64ec"),
23425 link_name = "llvm.aarch64.frint64x.f64"
23426 )]
23427 fn _vrnd64x_f64(a: f64) -> f64;
23428 }
23429 unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23430}
23431#[doc = "Floating-point round to 64-bit integer toward zero"]
23432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23433#[inline(always)]
23434#[target_feature(enable = "neon,frintts")]
23435#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23436#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23437pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23438 unsafe extern "unadjusted" {
23439 #[cfg_attr(
23440 any(target_arch = "aarch64", target_arch = "arm64ec"),
23441 link_name = "llvm.aarch64.neon.frint64z.v2f32"
23442 )]
23443 fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23444 }
23445 unsafe { _vrnd64z_f32(a) }
23446}
23447#[doc = "Floating-point round to 64-bit integer toward zero"]
23448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23449#[inline(always)]
23450#[target_feature(enable = "neon,frintts")]
23451#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23452#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23453pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23454 unsafe extern "unadjusted" {
23455 #[cfg_attr(
23456 any(target_arch = "aarch64", target_arch = "arm64ec"),
23457 link_name = "llvm.aarch64.neon.frint64z.v4f32"
23458 )]
23459 fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23460 }
23461 unsafe { _vrnd64zq_f32(a) }
23462}
23463#[doc = "Floating-point round to 64-bit integer toward zero"]
23464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23465#[inline(always)]
23466#[target_feature(enable = "neon,frintts")]
23467#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23468#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23469pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23470 unsafe extern "unadjusted" {
23471 #[cfg_attr(
23472 any(target_arch = "aarch64", target_arch = "arm64ec"),
23473 link_name = "llvm.aarch64.neon.frint64z.v2f64"
23474 )]
23475 fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23476 }
23477 unsafe { _vrnd64zq_f64(a) }
23478}
23479#[doc = "Floating-point round to 64-bit integer toward zero"]
23480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23481#[inline(always)]
23482#[target_feature(enable = "neon,frintts")]
23483#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23484#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23485pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23486 unsafe extern "unadjusted" {
23487 #[cfg_attr(
23488 any(target_arch = "aarch64", target_arch = "arm64ec"),
23489 link_name = "llvm.aarch64.frint64z.f64"
23490 )]
23491 fn _vrnd64z_f64(a: f64) -> f64;
23492 }
23493 unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23494}
23495#[doc = "Floating-point round to integral, toward zero"]
23496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23497#[inline(always)]
23498#[target_feature(enable = "neon,fp16")]
23499#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23500#[cfg(not(target_arch = "arm64ec"))]
23501#[cfg_attr(test, assert_instr(frintz))]
23502pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23503 unsafe { simd_trunc(a) }
23504}
23505#[doc = "Floating-point round to integral, toward zero"]
23506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23507#[inline(always)]
23508#[target_feature(enable = "neon,fp16")]
23509#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23510#[cfg(not(target_arch = "arm64ec"))]
23511#[cfg_attr(test, assert_instr(frintz))]
23512pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23513 unsafe { simd_trunc(a) }
23514}
23515#[doc = "Floating-point round to integral, toward zero"]
23516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23517#[inline(always)]
23518#[target_feature(enable = "neon")]
23519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23520#[cfg_attr(test, assert_instr(frintz))]
23521pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23522 unsafe { simd_trunc(a) }
23523}
23524#[doc = "Floating-point round to integral, toward zero"]
23525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23526#[inline(always)]
23527#[target_feature(enable = "neon")]
23528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23529#[cfg_attr(test, assert_instr(frintz))]
23530pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23531 unsafe { simd_trunc(a) }
23532}
23533#[doc = "Floating-point round to integral, toward zero"]
23534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23535#[inline(always)]
23536#[target_feature(enable = "neon")]
23537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23538#[cfg_attr(test, assert_instr(frintz))]
23539pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23540 unsafe { simd_trunc(a) }
23541}
23542#[doc = "Floating-point round to integral, toward zero"]
23543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23544#[inline(always)]
23545#[target_feature(enable = "neon")]
23546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23547#[cfg_attr(test, assert_instr(frintz))]
23548pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23549 unsafe { simd_trunc(a) }
23550}
23551#[doc = "Floating-point round to integral, to nearest with ties to away"]
23552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23553#[inline(always)]
23554#[target_feature(enable = "neon,fp16")]
23555#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23556#[cfg(not(target_arch = "arm64ec"))]
23557#[cfg_attr(test, assert_instr(frinta))]
23558pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23559 unsafe { simd_round(a) }
23560}
23561#[doc = "Floating-point round to integral, to nearest with ties to away"]
23562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23563#[inline(always)]
23564#[target_feature(enable = "neon,fp16")]
23565#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23566#[cfg(not(target_arch = "arm64ec"))]
23567#[cfg_attr(test, assert_instr(frinta))]
23568pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23569 unsafe { simd_round(a) }
23570}
23571#[doc = "Floating-point round to integral, to nearest with ties to away"]
23572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23573#[inline(always)]
23574#[target_feature(enable = "neon")]
23575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23576#[cfg_attr(test, assert_instr(frinta))]
23577pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23578 unsafe { simd_round(a) }
23579}
23580#[doc = "Floating-point round to integral, to nearest with ties to away"]
23581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23582#[inline(always)]
23583#[target_feature(enable = "neon")]
23584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23585#[cfg_attr(test, assert_instr(frinta))]
23586pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23587 unsafe { simd_round(a) }
23588}
23589#[doc = "Floating-point round to integral, to nearest with ties to away"]
23590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23591#[inline(always)]
23592#[target_feature(enable = "neon")]
23593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23594#[cfg_attr(test, assert_instr(frinta))]
23595pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23596 unsafe { simd_round(a) }
23597}
23598#[doc = "Floating-point round to integral, to nearest with ties to away"]
23599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23600#[inline(always)]
23601#[target_feature(enable = "neon")]
23602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23603#[cfg_attr(test, assert_instr(frinta))]
23604pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23605 unsafe { simd_round(a) }
23606}
23607#[doc = "Floating-point round to integral, to nearest with ties to away"]
23608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23609#[inline(always)]
23610#[target_feature(enable = "neon,fp16")]
23611#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23612#[cfg(not(target_arch = "arm64ec"))]
23613#[cfg_attr(test, assert_instr(frinta))]
23614pub fn vrndah_f16(a: f16) -> f16 {
23615 roundf16(a)
23616}
23617#[doc = "Floating-point round to integral, to nearest with ties to away"]
23618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23619#[inline(always)]
23620#[target_feature(enable = "neon,fp16")]
23621#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23622#[cfg(not(target_arch = "arm64ec"))]
23623#[cfg_attr(test, assert_instr(frintz))]
23624pub fn vrndh_f16(a: f16) -> f16 {
23625 truncf16(a)
23626}
23627#[doc = "Floating-point round to integral, using current rounding mode"]
23628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23629#[inline(always)]
23630#[target_feature(enable = "neon,fp16")]
23631#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23632#[cfg(not(target_arch = "arm64ec"))]
23633#[cfg_attr(test, assert_instr(frinti))]
23634pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23635 unsafe extern "unadjusted" {
23636 #[cfg_attr(
23637 any(target_arch = "aarch64", target_arch = "arm64ec"),
23638 link_name = "llvm.nearbyint.v4f16"
23639 )]
23640 fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23641 }
23642 unsafe { _vrndi_f16(a) }
23643}
23644#[doc = "Floating-point round to integral, using current rounding mode"]
23645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23646#[inline(always)]
23647#[target_feature(enable = "neon,fp16")]
23648#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23649#[cfg(not(target_arch = "arm64ec"))]
23650#[cfg_attr(test, assert_instr(frinti))]
23651pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23652 unsafe extern "unadjusted" {
23653 #[cfg_attr(
23654 any(target_arch = "aarch64", target_arch = "arm64ec"),
23655 link_name = "llvm.nearbyint.v8f16"
23656 )]
23657 fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23658 }
23659 unsafe { _vrndiq_f16(a) }
23660}
23661#[doc = "Floating-point round to integral, using current rounding mode"]
23662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23663#[inline(always)]
23664#[target_feature(enable = "neon")]
23665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23666#[cfg_attr(test, assert_instr(frinti))]
23667pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23668 unsafe extern "unadjusted" {
23669 #[cfg_attr(
23670 any(target_arch = "aarch64", target_arch = "arm64ec"),
23671 link_name = "llvm.nearbyint.v2f32"
23672 )]
23673 fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23674 }
23675 unsafe { _vrndi_f32(a) }
23676}
23677#[doc = "Floating-point round to integral, using current rounding mode"]
23678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23679#[inline(always)]
23680#[target_feature(enable = "neon")]
23681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23682#[cfg_attr(test, assert_instr(frinti))]
23683pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23684 unsafe extern "unadjusted" {
23685 #[cfg_attr(
23686 any(target_arch = "aarch64", target_arch = "arm64ec"),
23687 link_name = "llvm.nearbyint.v4f32"
23688 )]
23689 fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23690 }
23691 unsafe { _vrndiq_f32(a) }
23692}
23693#[doc = "Floating-point round to integral, using current rounding mode"]
23694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23695#[inline(always)]
23696#[target_feature(enable = "neon")]
23697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23698#[cfg_attr(test, assert_instr(frinti))]
23699pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23700 unsafe extern "unadjusted" {
23701 #[cfg_attr(
23702 any(target_arch = "aarch64", target_arch = "arm64ec"),
23703 link_name = "llvm.nearbyint.v1f64"
23704 )]
23705 fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23706 }
23707 unsafe { _vrndi_f64(a) }
23708}
23709#[doc = "Floating-point round to integral, using current rounding mode"]
23710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23711#[inline(always)]
23712#[target_feature(enable = "neon")]
23713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23714#[cfg_attr(test, assert_instr(frinti))]
23715pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23716 unsafe extern "unadjusted" {
23717 #[cfg_attr(
23718 any(target_arch = "aarch64", target_arch = "arm64ec"),
23719 link_name = "llvm.nearbyint.v2f64"
23720 )]
23721 fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23722 }
23723 unsafe { _vrndiq_f64(a) }
23724}
23725#[doc = "Floating-point round to integral, using current rounding mode"]
23726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23727#[inline(always)]
23728#[target_feature(enable = "neon,fp16")]
23729#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23730#[cfg(not(target_arch = "arm64ec"))]
23731#[cfg_attr(test, assert_instr(frinti))]
23732pub fn vrndih_f16(a: f16) -> f16 {
23733 unsafe extern "unadjusted" {
23734 #[cfg_attr(
23735 any(target_arch = "aarch64", target_arch = "arm64ec"),
23736 link_name = "llvm.nearbyint.f16"
23737 )]
23738 fn _vrndih_f16(a: f16) -> f16;
23739 }
23740 unsafe { _vrndih_f16(a) }
23741}
23742#[doc = "Floating-point round to integral, toward minus infinity"]
23743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23744#[inline(always)]
23745#[target_feature(enable = "neon,fp16")]
23746#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23747#[cfg(not(target_arch = "arm64ec"))]
23748#[cfg_attr(test, assert_instr(frintm))]
23749pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23750 unsafe { simd_floor(a) }
23751}
23752#[doc = "Floating-point round to integral, toward minus infinity"]
23753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23754#[inline(always)]
23755#[target_feature(enable = "neon,fp16")]
23756#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23757#[cfg(not(target_arch = "arm64ec"))]
23758#[cfg_attr(test, assert_instr(frintm))]
23759pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23760 unsafe { simd_floor(a) }
23761}
23762#[doc = "Floating-point round to integral, toward minus infinity"]
23763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23764#[inline(always)]
23765#[target_feature(enable = "neon")]
23766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23767#[cfg_attr(test, assert_instr(frintm))]
23768pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23769 unsafe { simd_floor(a) }
23770}
23771#[doc = "Floating-point round to integral, toward minus infinity"]
23772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23773#[inline(always)]
23774#[target_feature(enable = "neon")]
23775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23776#[cfg_attr(test, assert_instr(frintm))]
23777pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23778 unsafe { simd_floor(a) }
23779}
23780#[doc = "Floating-point round to integral, toward minus infinity"]
23781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23782#[inline(always)]
23783#[target_feature(enable = "neon")]
23784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23785#[cfg_attr(test, assert_instr(frintm))]
23786pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23787 unsafe { simd_floor(a) }
23788}
23789#[doc = "Floating-point round to integral, toward minus infinity"]
23790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23791#[inline(always)]
23792#[target_feature(enable = "neon")]
23793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23794#[cfg_attr(test, assert_instr(frintm))]
23795pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23796 unsafe { simd_floor(a) }
23797}
23798#[doc = "Floating-point round to integral, toward minus infinity"]
23799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23800#[inline(always)]
23801#[target_feature(enable = "neon,fp16")]
23802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23803#[cfg(not(target_arch = "arm64ec"))]
23804#[cfg_attr(test, assert_instr(frintm))]
23805pub fn vrndmh_f16(a: f16) -> f16 {
23806 floorf16(a)
23807}
23808#[doc = "Floating-point round to integral, to nearest with ties to even"]
23809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23810#[inline(always)]
23811#[target_feature(enable = "neon")]
23812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23813#[cfg_attr(test, assert_instr(frintn))]
23814pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23815 unsafe extern "unadjusted" {
23816 #[cfg_attr(
23817 any(target_arch = "aarch64", target_arch = "arm64ec"),
23818 link_name = "llvm.roundeven.v1f64"
23819 )]
23820 fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23821 }
23822 unsafe { _vrndn_f64(a) }
23823}
23824#[doc = "Floating-point round to integral, to nearest with ties to even"]
23825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23826#[inline(always)]
23827#[target_feature(enable = "neon")]
23828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23829#[cfg_attr(test, assert_instr(frintn))]
23830pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23831 unsafe extern "unadjusted" {
23832 #[cfg_attr(
23833 any(target_arch = "aarch64", target_arch = "arm64ec"),
23834 link_name = "llvm.roundeven.v2f64"
23835 )]
23836 fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23837 }
23838 unsafe { _vrndnq_f64(a) }
23839}
23840#[doc = "Floating-point round to integral, toward minus infinity"]
23841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23842#[inline(always)]
23843#[target_feature(enable = "neon,fp16")]
23844#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23845#[cfg(not(target_arch = "arm64ec"))]
23846#[cfg_attr(test, assert_instr(frintn))]
23847pub fn vrndnh_f16(a: f16) -> f16 {
23848 unsafe extern "unadjusted" {
23849 #[cfg_attr(
23850 any(target_arch = "aarch64", target_arch = "arm64ec"),
23851 link_name = "llvm.roundeven.f16"
23852 )]
23853 fn _vrndnh_f16(a: f16) -> f16;
23854 }
23855 unsafe { _vrndnh_f16(a) }
23856}
23857#[doc = "Floating-point round to integral, to nearest with ties to even"]
23858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23859#[inline(always)]
23860#[target_feature(enable = "neon")]
23861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23862#[cfg_attr(test, assert_instr(frintn))]
23863pub fn vrndns_f32(a: f32) -> f32 {
23864 unsafe extern "unadjusted" {
23865 #[cfg_attr(
23866 any(target_arch = "aarch64", target_arch = "arm64ec"),
23867 link_name = "llvm.roundeven.f32"
23868 )]
23869 fn _vrndns_f32(a: f32) -> f32;
23870 }
23871 unsafe { _vrndns_f32(a) }
23872}
23873#[doc = "Floating-point round to integral, toward plus infinity"]
23874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23875#[inline(always)]
23876#[target_feature(enable = "neon,fp16")]
23877#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23878#[cfg(not(target_arch = "arm64ec"))]
23879#[cfg_attr(test, assert_instr(frintp))]
23880pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23881 unsafe { simd_ceil(a) }
23882}
23883#[doc = "Floating-point round to integral, toward plus infinity"]
23884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23885#[inline(always)]
23886#[target_feature(enable = "neon,fp16")]
23887#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23888#[cfg(not(target_arch = "arm64ec"))]
23889#[cfg_attr(test, assert_instr(frintp))]
23890pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23891 unsafe { simd_ceil(a) }
23892}
23893#[doc = "Floating-point round to integral, toward plus infinity"]
23894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23895#[inline(always)]
23896#[target_feature(enable = "neon")]
23897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23898#[cfg_attr(test, assert_instr(frintp))]
23899pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23900 unsafe { simd_ceil(a) }
23901}
23902#[doc = "Floating-point round to integral, toward plus infinity"]
23903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23904#[inline(always)]
23905#[target_feature(enable = "neon")]
23906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23907#[cfg_attr(test, assert_instr(frintp))]
23908pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23909 unsafe { simd_ceil(a) }
23910}
23911#[doc = "Floating-point round to integral, toward plus infinity"]
23912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23913#[inline(always)]
23914#[target_feature(enable = "neon")]
23915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23916#[cfg_attr(test, assert_instr(frintp))]
23917pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23918 unsafe { simd_ceil(a) }
23919}
23920#[doc = "Floating-point round to integral, toward plus infinity"]
23921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23922#[inline(always)]
23923#[target_feature(enable = "neon")]
23924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23925#[cfg_attr(test, assert_instr(frintp))]
23926pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23927 unsafe { simd_ceil(a) }
23928}
23929#[doc = "Floating-point round to integral, toward plus infinity"]
23930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23931#[inline(always)]
23932#[target_feature(enable = "neon,fp16")]
23933#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23934#[cfg(not(target_arch = "arm64ec"))]
23935#[cfg_attr(test, assert_instr(frintp))]
23936pub fn vrndph_f16(a: f16) -> f16 {
23937 ceilf16(a)
23938}
23939#[doc = "Floating-point round to integral exact, using current rounding mode"]
23940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23941#[inline(always)]
23942#[target_feature(enable = "neon,fp16")]
23943#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23944#[cfg(not(target_arch = "arm64ec"))]
23945#[cfg_attr(test, assert_instr(frintx))]
23946pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23947 unsafe { simd_round_ties_even(a) }
23948}
23949#[doc = "Floating-point round to integral exact, using current rounding mode"]
23950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23951#[inline(always)]
23952#[target_feature(enable = "neon,fp16")]
23953#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
23954#[cfg(not(target_arch = "arm64ec"))]
23955#[cfg_attr(test, assert_instr(frintx))]
23956pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23957 unsafe { simd_round_ties_even(a) }
23958}
23959#[doc = "Floating-point round to integral exact, using current rounding mode"]
23960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23961#[inline(always)]
23962#[target_feature(enable = "neon")]
23963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23964#[cfg_attr(test, assert_instr(frintx))]
23965pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23966 unsafe { simd_round_ties_even(a) }
23967}
23968#[doc = "Floating-point round to integral exact, using current rounding mode"]
23969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23970#[inline(always)]
23971#[target_feature(enable = "neon")]
23972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23973#[cfg_attr(test, assert_instr(frintx))]
23974pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23975 unsafe { simd_round_ties_even(a) }
23976}
23977#[doc = "Floating-point round to integral exact, using current rounding mode"]
23978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23979#[inline(always)]
23980#[target_feature(enable = "neon")]
23981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23982#[cfg_attr(test, assert_instr(frintx))]
23983pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23984 unsafe { simd_round_ties_even(a) }
23985}
23986#[doc = "Floating-point round to integral exact, using current rounding mode"]
23987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23988#[inline(always)]
23989#[target_feature(enable = "neon")]
23990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23991#[cfg_attr(test, assert_instr(frintx))]
23992pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
23993 unsafe { simd_round_ties_even(a) }
23994}
23995#[doc = "Floating-point round to integral, using current rounding mode"]
23996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
23997#[inline(always)]
23998#[target_feature(enable = "neon,fp16")]
23999#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24000#[cfg(not(target_arch = "arm64ec"))]
24001#[cfg_attr(test, assert_instr(frintx))]
24002pub fn vrndxh_f16(a: f16) -> f16 {
24003 round_ties_even_f16(a)
24004}
24005#[doc = "Signed rounding shift left"]
24006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
24007#[inline(always)]
24008#[target_feature(enable = "neon")]
24009#[cfg_attr(test, assert_instr(srshl))]
24010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24011pub fn vrshld_s64(a: i64, b: i64) -> i64 {
24012 unsafe extern "unadjusted" {
24013 #[cfg_attr(
24014 any(target_arch = "aarch64", target_arch = "arm64ec"),
24015 link_name = "llvm.aarch64.neon.srshl.i64"
24016 )]
24017 fn _vrshld_s64(a: i64, b: i64) -> i64;
24018 }
24019 unsafe { _vrshld_s64(a, b) }
24020}
24021#[doc = "Unsigned rounding shift left"]
24022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
24023#[inline(always)]
24024#[target_feature(enable = "neon")]
24025#[cfg_attr(test, assert_instr(urshl))]
24026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24027pub fn vrshld_u64(a: u64, b: i64) -> u64 {
24028 unsafe extern "unadjusted" {
24029 #[cfg_attr(
24030 any(target_arch = "aarch64", target_arch = "arm64ec"),
24031 link_name = "llvm.aarch64.neon.urshl.i64"
24032 )]
24033 fn _vrshld_u64(a: u64, b: i64) -> u64;
24034 }
24035 unsafe { _vrshld_u64(a, b) }
24036}
24037#[doc = "Signed rounding shift right"]
24038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
24039#[inline(always)]
24040#[target_feature(enable = "neon")]
24041#[cfg_attr(test, assert_instr(srshr, N = 2))]
24042#[rustc_legacy_const_generics(1)]
24043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24044pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
24045 static_assert!(N >= 1 && N <= 64);
24046 vrshld_s64(a, -N as i64)
24047}
24048#[doc = "Unsigned rounding shift right"]
24049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
24050#[inline(always)]
24051#[target_feature(enable = "neon")]
24052#[cfg_attr(test, assert_instr(urshr, N = 2))]
24053#[rustc_legacy_const_generics(1)]
24054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24055pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
24056 static_assert!(N >= 1 && N <= 64);
24057 vrshld_u64(a, -N as i64)
24058}
24059#[doc = "Rounding shift right narrow"]
24060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
24061#[inline(always)]
24062#[target_feature(enable = "neon")]
24063#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24064#[rustc_legacy_const_generics(2)]
24065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24066pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24067 static_assert!(N >= 1 && N <= 8);
24068 unsafe {
24069 simd_shuffle!(
24070 a,
24071 vrshrn_n_s16::<N>(b),
24072 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24073 )
24074 }
24075}
24076#[doc = "Rounding shift right narrow"]
24077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
24078#[inline(always)]
24079#[target_feature(enable = "neon")]
24080#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24081#[rustc_legacy_const_generics(2)]
24082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24083pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24084 static_assert!(N >= 1 && N <= 16);
24085 unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24086}
24087#[doc = "Rounding shift right narrow"]
24088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
24089#[inline(always)]
24090#[target_feature(enable = "neon")]
24091#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24092#[rustc_legacy_const_generics(2)]
24093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24094pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24095 static_assert!(N >= 1 && N <= 32);
24096 unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24097}
24098#[doc = "Rounding shift right narrow"]
24099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
24100#[inline(always)]
24101#[target_feature(enable = "neon")]
24102#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24103#[rustc_legacy_const_generics(2)]
24104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24105pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24106 static_assert!(N >= 1 && N <= 8);
24107 unsafe {
24108 simd_shuffle!(
24109 a,
24110 vrshrn_n_u16::<N>(b),
24111 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24112 )
24113 }
24114}
24115#[doc = "Rounding shift right narrow"]
24116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
24117#[inline(always)]
24118#[target_feature(enable = "neon")]
24119#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24120#[rustc_legacy_const_generics(2)]
24121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24122pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24123 static_assert!(N >= 1 && N <= 16);
24124 unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24125}
24126#[doc = "Rounding shift right narrow"]
24127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
24128#[inline(always)]
24129#[target_feature(enable = "neon")]
24130#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24131#[rustc_legacy_const_generics(2)]
24132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24133pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24134 static_assert!(N >= 1 && N <= 32);
24135 unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24136}
24137#[doc = "Reciprocal square-root estimate."]
24138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
24139#[inline(always)]
24140#[target_feature(enable = "neon")]
24141#[cfg_attr(test, assert_instr(frsqrte))]
24142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24143pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
24144 unsafe extern "unadjusted" {
24145 #[cfg_attr(
24146 any(target_arch = "aarch64", target_arch = "arm64ec"),
24147 link_name = "llvm.aarch64.neon.frsqrte.v1f64"
24148 )]
24149 fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
24150 }
24151 unsafe { _vrsqrte_f64(a) }
24152}
24153#[doc = "Reciprocal square-root estimate."]
24154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
24155#[inline(always)]
24156#[target_feature(enable = "neon")]
24157#[cfg_attr(test, assert_instr(frsqrte))]
24158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24159pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
24160 unsafe extern "unadjusted" {
24161 #[cfg_attr(
24162 any(target_arch = "aarch64", target_arch = "arm64ec"),
24163 link_name = "llvm.aarch64.neon.frsqrte.v2f64"
24164 )]
24165 fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
24166 }
24167 unsafe { _vrsqrteq_f64(a) }
24168}
24169#[doc = "Reciprocal square-root estimate."]
24170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
24171#[inline(always)]
24172#[target_feature(enable = "neon")]
24173#[cfg_attr(test, assert_instr(frsqrte))]
24174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24175pub fn vrsqrted_f64(a: f64) -> f64 {
24176 unsafe extern "unadjusted" {
24177 #[cfg_attr(
24178 any(target_arch = "aarch64", target_arch = "arm64ec"),
24179 link_name = "llvm.aarch64.neon.frsqrte.f64"
24180 )]
24181 fn _vrsqrted_f64(a: f64) -> f64;
24182 }
24183 unsafe { _vrsqrted_f64(a) }
24184}
24185#[doc = "Reciprocal square-root estimate."]
24186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
24187#[inline(always)]
24188#[target_feature(enable = "neon")]
24189#[cfg_attr(test, assert_instr(frsqrte))]
24190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24191pub fn vrsqrtes_f32(a: f32) -> f32 {
24192 unsafe extern "unadjusted" {
24193 #[cfg_attr(
24194 any(target_arch = "aarch64", target_arch = "arm64ec"),
24195 link_name = "llvm.aarch64.neon.frsqrte.f32"
24196 )]
24197 fn _vrsqrtes_f32(a: f32) -> f32;
24198 }
24199 unsafe { _vrsqrtes_f32(a) }
24200}
24201#[doc = "Reciprocal square-root estimate."]
24202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
24203#[inline(always)]
24204#[cfg_attr(test, assert_instr(frsqrte))]
24205#[target_feature(enable = "neon,fp16")]
24206#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24207#[cfg(not(target_arch = "arm64ec"))]
24208pub fn vrsqrteh_f16(a: f16) -> f16 {
24209 unsafe extern "unadjusted" {
24210 #[cfg_attr(
24211 any(target_arch = "aarch64", target_arch = "arm64ec"),
24212 link_name = "llvm.aarch64.neon.frsqrte.f16"
24213 )]
24214 fn _vrsqrteh_f16(a: f16) -> f16;
24215 }
24216 unsafe { _vrsqrteh_f16(a) }
24217}
24218#[doc = "Floating-point reciprocal square root step"]
24219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
24220#[inline(always)]
24221#[target_feature(enable = "neon")]
24222#[cfg_attr(test, assert_instr(frsqrts))]
24223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24224pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
24225 unsafe extern "unadjusted" {
24226 #[cfg_attr(
24227 any(target_arch = "aarch64", target_arch = "arm64ec"),
24228 link_name = "llvm.aarch64.neon.frsqrts.v1f64"
24229 )]
24230 fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
24231 }
24232 unsafe { _vrsqrts_f64(a, b) }
24233}
24234#[doc = "Floating-point reciprocal square root step"]
24235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
24236#[inline(always)]
24237#[target_feature(enable = "neon")]
24238#[cfg_attr(test, assert_instr(frsqrts))]
24239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24240pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
24241 unsafe extern "unadjusted" {
24242 #[cfg_attr(
24243 any(target_arch = "aarch64", target_arch = "arm64ec"),
24244 link_name = "llvm.aarch64.neon.frsqrts.v2f64"
24245 )]
24246 fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
24247 }
24248 unsafe { _vrsqrtsq_f64(a, b) }
24249}
24250#[doc = "Floating-point reciprocal square root step"]
24251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
24252#[inline(always)]
24253#[target_feature(enable = "neon")]
24254#[cfg_attr(test, assert_instr(frsqrts))]
24255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24256pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
24257 unsafe extern "unadjusted" {
24258 #[cfg_attr(
24259 any(target_arch = "aarch64", target_arch = "arm64ec"),
24260 link_name = "llvm.aarch64.neon.frsqrts.f64"
24261 )]
24262 fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
24263 }
24264 unsafe { _vrsqrtsd_f64(a, b) }
24265}
24266#[doc = "Floating-point reciprocal square root step"]
24267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
24268#[inline(always)]
24269#[target_feature(enable = "neon")]
24270#[cfg_attr(test, assert_instr(frsqrts))]
24271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24272pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
24273 unsafe extern "unadjusted" {
24274 #[cfg_attr(
24275 any(target_arch = "aarch64", target_arch = "arm64ec"),
24276 link_name = "llvm.aarch64.neon.frsqrts.f32"
24277 )]
24278 fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
24279 }
24280 unsafe { _vrsqrtss_f32(a, b) }
24281}
24282#[doc = "Floating-point reciprocal square root step"]
24283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
24284#[inline(always)]
24285#[target_feature(enable = "neon,fp16")]
24286#[cfg_attr(test, assert_instr(frsqrts))]
24287#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24288#[cfg(not(target_arch = "arm64ec"))]
24289pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
24290 unsafe extern "unadjusted" {
24291 #[cfg_attr(
24292 any(target_arch = "aarch64", target_arch = "arm64ec"),
24293 link_name = "llvm.aarch64.neon.frsqrts.f16"
24294 )]
24295 fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
24296 }
24297 unsafe { _vrsqrtsh_f16(a, b) }
24298}
24299#[doc = "Signed rounding shift right and accumulate."]
24300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
24301#[inline(always)]
24302#[target_feature(enable = "neon")]
24303#[cfg_attr(test, assert_instr(srshr, N = 2))]
24304#[rustc_legacy_const_generics(2)]
24305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24306pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24307 static_assert!(N >= 1 && N <= 64);
24308 let b: i64 = vrshrd_n_s64::<N>(b);
24309 a.wrapping_add(b)
24310}
24311#[doc = "Unsigned rounding shift right and accumulate."]
24312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
24313#[inline(always)]
24314#[target_feature(enable = "neon")]
24315#[cfg_attr(test, assert_instr(urshr, N = 2))]
24316#[rustc_legacy_const_generics(2)]
24317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24318pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24319 static_assert!(N >= 1 && N <= 64);
24320 let b: u64 = vrshrd_n_u64::<N>(b);
24321 a.wrapping_add(b)
24322}
24323#[doc = "Rounding subtract returning high narrow"]
24324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24325#[inline(always)]
24326#[target_feature(enable = "neon")]
24327#[cfg(target_endian = "little")]
24328#[cfg_attr(test, assert_instr(rsubhn2))]
24329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24330pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24331 let x: int8x8_t = vrsubhn_s16(b, c);
24332 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24333}
24334#[doc = "Rounding subtract returning high narrow"]
24335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24336#[inline(always)]
24337#[target_feature(enable = "neon")]
24338#[cfg(target_endian = "little")]
24339#[cfg_attr(test, assert_instr(rsubhn2))]
24340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24341pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24342 let x: int16x4_t = vrsubhn_s32(b, c);
24343 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24344}
24345#[doc = "Rounding subtract returning high narrow"]
24346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24347#[inline(always)]
24348#[target_feature(enable = "neon")]
24349#[cfg(target_endian = "little")]
24350#[cfg_attr(test, assert_instr(rsubhn2))]
24351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24352pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24353 let x: int32x2_t = vrsubhn_s64(b, c);
24354 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24355}
24356#[doc = "Rounding subtract returning high narrow"]
24357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24358#[inline(always)]
24359#[target_feature(enable = "neon")]
24360#[cfg(target_endian = "little")]
24361#[cfg_attr(test, assert_instr(rsubhn2))]
24362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24363pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24364 let x: uint8x8_t = vrsubhn_u16(b, c);
24365 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24366}
24367#[doc = "Rounding subtract returning high narrow"]
24368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24369#[inline(always)]
24370#[target_feature(enable = "neon")]
24371#[cfg(target_endian = "little")]
24372#[cfg_attr(test, assert_instr(rsubhn2))]
24373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24374pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24375 let x: uint16x4_t = vrsubhn_u32(b, c);
24376 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24377}
24378#[doc = "Rounding subtract returning high narrow"]
24379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24380#[inline(always)]
24381#[target_feature(enable = "neon")]
24382#[cfg(target_endian = "little")]
24383#[cfg_attr(test, assert_instr(rsubhn2))]
24384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24385pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24386 let x: uint32x2_t = vrsubhn_u64(b, c);
24387 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24388}
24389#[doc = "Rounding subtract returning high narrow"]
24390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24391#[inline(always)]
24392#[target_feature(enable = "neon")]
24393#[cfg(target_endian = "big")]
24394#[cfg_attr(test, assert_instr(rsubhn))]
24395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24396pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24397 let x: int8x8_t = vrsubhn_s16(b, c);
24398 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24399}
24400#[doc = "Rounding subtract returning high narrow"]
24401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24402#[inline(always)]
24403#[target_feature(enable = "neon")]
24404#[cfg(target_endian = "big")]
24405#[cfg_attr(test, assert_instr(rsubhn))]
24406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24407pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24408 let x: int16x4_t = vrsubhn_s32(b, c);
24409 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24410}
24411#[doc = "Rounding subtract returning high narrow"]
24412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24413#[inline(always)]
24414#[target_feature(enable = "neon")]
24415#[cfg(target_endian = "big")]
24416#[cfg_attr(test, assert_instr(rsubhn))]
24417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24418pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24419 let x: int32x2_t = vrsubhn_s64(b, c);
24420 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24421}
24422#[doc = "Rounding subtract returning high narrow"]
24423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24424#[inline(always)]
24425#[target_feature(enable = "neon")]
24426#[cfg(target_endian = "big")]
24427#[cfg_attr(test, assert_instr(rsubhn))]
24428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24429pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24430 let x: uint8x8_t = vrsubhn_u16(b, c);
24431 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24432}
24433#[doc = "Rounding subtract returning high narrow"]
24434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24435#[inline(always)]
24436#[target_feature(enable = "neon")]
24437#[cfg(target_endian = "big")]
24438#[cfg_attr(test, assert_instr(rsubhn))]
24439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24440pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24441 let x: uint16x4_t = vrsubhn_u32(b, c);
24442 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24443}
24444#[doc = "Rounding subtract returning high narrow"]
24445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24446#[inline(always)]
24447#[target_feature(enable = "neon")]
24448#[cfg(target_endian = "big")]
24449#[cfg_attr(test, assert_instr(rsubhn))]
24450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24451pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24452 let x: uint32x2_t = vrsubhn_u64(b, c);
24453 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24454}
24455#[doc = "Multi-vector floating-point adjust exponent"]
24456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
24457#[inline(always)]
24458#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24459#[target_feature(enable = "neon,fp8")]
24460#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24461pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
24462 unsafe extern "unadjusted" {
24463 #[cfg_attr(
24464 any(target_arch = "aarch64", target_arch = "arm64ec"),
24465 link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
24466 )]
24467 fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
24468 }
24469 unsafe { _vscale_f16(vn, vm) }
24470}
24471#[doc = "Multi-vector floating-point adjust exponent"]
24472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
24473#[inline(always)]
24474#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24475#[target_feature(enable = "neon,fp8")]
24476#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24477pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
24478 unsafe extern "unadjusted" {
24479 #[cfg_attr(
24480 any(target_arch = "aarch64", target_arch = "arm64ec"),
24481 link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
24482 )]
24483 fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
24484 }
24485 unsafe { _vscaleq_f16(vn, vm) }
24486}
24487#[doc = "Multi-vector floating-point adjust exponent"]
24488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
24489#[inline(always)]
24490#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24491#[target_feature(enable = "neon,fp8")]
24492#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24493pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
24494 unsafe extern "unadjusted" {
24495 #[cfg_attr(
24496 any(target_arch = "aarch64", target_arch = "arm64ec"),
24497 link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
24498 )]
24499 fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
24500 }
24501 unsafe { _vscale_f32(vn, vm) }
24502}
24503#[doc = "Multi-vector floating-point adjust exponent"]
24504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
24505#[inline(always)]
24506#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24507#[target_feature(enable = "neon,fp8")]
24508#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24509pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
24510 unsafe extern "unadjusted" {
24511 #[cfg_attr(
24512 any(target_arch = "aarch64", target_arch = "arm64ec"),
24513 link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
24514 )]
24515 fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
24516 }
24517 unsafe { _vscaleq_f32(vn, vm) }
24518}
24519#[doc = "Multi-vector floating-point adjust exponent"]
24520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
24521#[inline(always)]
24522#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24523#[target_feature(enable = "neon,fp8")]
24524#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24525pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
24526 unsafe extern "unadjusted" {
24527 #[cfg_attr(
24528 any(target_arch = "aarch64", target_arch = "arm64ec"),
24529 link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
24530 )]
24531 fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
24532 }
24533 unsafe { _vscaleq_f64(vn, vm) }
24534}
24535#[doc = "Insert vector element from another vector element"]
24536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24537#[inline(always)]
24538#[target_feature(enable = "neon")]
24539#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24540#[rustc_legacy_const_generics(2)]
24541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24542pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24543 static_assert!(LANE == 0);
24544 unsafe { simd_insert!(b, LANE as u32, a) }
24545}
24546#[doc = "Insert vector element from another vector element"]
24547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24548#[inline(always)]
24549#[target_feature(enable = "neon")]
24550#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24551#[rustc_legacy_const_generics(2)]
24552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24553pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24554 static_assert_uimm_bits!(LANE, 1);
24555 unsafe { simd_insert!(b, LANE as u32, a) }
24556}
24557#[doc = "SHA512 hash update part 2"]
24558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24559#[inline(always)]
24560#[target_feature(enable = "neon,sha3")]
24561#[cfg_attr(test, assert_instr(sha512h2))]
24562#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24563pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24564 unsafe extern "unadjusted" {
24565 #[cfg_attr(
24566 any(target_arch = "aarch64", target_arch = "arm64ec"),
24567 link_name = "llvm.aarch64.crypto.sha512h2"
24568 )]
24569 fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24570 }
24571 unsafe { _vsha512h2q_u64(a, b, c) }
24572}
24573#[doc = "SHA512 hash update part 1"]
24574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24575#[inline(always)]
24576#[target_feature(enable = "neon,sha3")]
24577#[cfg_attr(test, assert_instr(sha512h))]
24578#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24579pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24580 unsafe extern "unadjusted" {
24581 #[cfg_attr(
24582 any(target_arch = "aarch64", target_arch = "arm64ec"),
24583 link_name = "llvm.aarch64.crypto.sha512h"
24584 )]
24585 fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24586 }
24587 unsafe { _vsha512hq_u64(a, b, c) }
24588}
24589#[doc = "SHA512 schedule update 0"]
24590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24591#[inline(always)]
24592#[target_feature(enable = "neon,sha3")]
24593#[cfg_attr(test, assert_instr(sha512su0))]
24594#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24595pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24596 unsafe extern "unadjusted" {
24597 #[cfg_attr(
24598 any(target_arch = "aarch64", target_arch = "arm64ec"),
24599 link_name = "llvm.aarch64.crypto.sha512su0"
24600 )]
24601 fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24602 }
24603 unsafe { _vsha512su0q_u64(a, b) }
24604}
24605#[doc = "SHA512 schedule update 1"]
24606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24607#[inline(always)]
24608#[target_feature(enable = "neon,sha3")]
24609#[cfg_attr(test, assert_instr(sha512su1))]
24610#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24611pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24612 unsafe extern "unadjusted" {
24613 #[cfg_attr(
24614 any(target_arch = "aarch64", target_arch = "arm64ec"),
24615 link_name = "llvm.aarch64.crypto.sha512su1"
24616 )]
24617 fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24618 }
24619 unsafe { _vsha512su1q_u64(a, b, c) }
24620}
24621#[doc = "Signed Shift left"]
24622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24623#[inline(always)]
24624#[target_feature(enable = "neon")]
24625#[cfg_attr(test, assert_instr(sshl))]
24626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24627pub fn vshld_s64(a: i64, b: i64) -> i64 {
24628 unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24629}
24630#[doc = "Unsigned Shift left"]
24631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24632#[inline(always)]
24633#[target_feature(enable = "neon")]
24634#[cfg_attr(test, assert_instr(ushl))]
24635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24636pub fn vshld_u64(a: u64, b: i64) -> u64 {
24637 unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24638}
24639#[doc = "Signed shift left long"]
24640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24641#[inline(always)]
24642#[target_feature(enable = "neon")]
24643#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24644#[rustc_legacy_const_generics(1)]
24645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24646pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24647 static_assert!(N >= 0 && N <= 8);
24648 unsafe {
24649 let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24650 vshll_n_s8::<N>(b)
24651 }
24652}
24653#[doc = "Signed shift left long"]
24654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24655#[inline(always)]
24656#[target_feature(enable = "neon")]
24657#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24658#[rustc_legacy_const_generics(1)]
24659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24660pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24661 static_assert!(N >= 0 && N <= 16);
24662 unsafe {
24663 let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24664 vshll_n_s16::<N>(b)
24665 }
24666}
24667#[doc = "Signed shift left long"]
24668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24669#[inline(always)]
24670#[target_feature(enable = "neon")]
24671#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24672#[rustc_legacy_const_generics(1)]
24673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24674pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24675 static_assert!(N >= 0 && N <= 32);
24676 unsafe {
24677 let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24678 vshll_n_s32::<N>(b)
24679 }
24680}
24681#[doc = "Signed shift left long"]
24682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24683#[inline(always)]
24684#[target_feature(enable = "neon")]
24685#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24686#[rustc_legacy_const_generics(1)]
24687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24688pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24689 static_assert!(N >= 0 && N <= 8);
24690 unsafe {
24691 let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24692 vshll_n_u8::<N>(b)
24693 }
24694}
24695#[doc = "Signed shift left long"]
24696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24697#[inline(always)]
24698#[target_feature(enable = "neon")]
24699#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24700#[rustc_legacy_const_generics(1)]
24701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24702pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24703 static_assert!(N >= 0 && N <= 16);
24704 unsafe {
24705 let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24706 vshll_n_u16::<N>(b)
24707 }
24708}
24709#[doc = "Signed shift left long"]
24710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24711#[inline(always)]
24712#[target_feature(enable = "neon")]
24713#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24714#[rustc_legacy_const_generics(1)]
24715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24716pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24717 static_assert!(N >= 0 && N <= 32);
24718 unsafe {
24719 let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24720 vshll_n_u32::<N>(b)
24721 }
24722}
24723#[doc = "Shift right narrow"]
24724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24725#[inline(always)]
24726#[target_feature(enable = "neon")]
24727#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24728#[rustc_legacy_const_generics(2)]
24729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24730pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24731 static_assert!(N >= 1 && N <= 8);
24732 unsafe {
24733 simd_shuffle!(
24734 a,
24735 vshrn_n_s16::<N>(b),
24736 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24737 )
24738 }
24739}
24740#[doc = "Shift right narrow"]
24741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24742#[inline(always)]
24743#[target_feature(enable = "neon")]
24744#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24745#[rustc_legacy_const_generics(2)]
24746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24747pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24748 static_assert!(N >= 1 && N <= 16);
24749 unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24750}
24751#[doc = "Shift right narrow"]
24752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24753#[inline(always)]
24754#[target_feature(enable = "neon")]
24755#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24756#[rustc_legacy_const_generics(2)]
24757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24758pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24759 static_assert!(N >= 1 && N <= 32);
24760 unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24761}
24762#[doc = "Shift right narrow"]
24763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24764#[inline(always)]
24765#[target_feature(enable = "neon")]
24766#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24767#[rustc_legacy_const_generics(2)]
24768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24769pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24770 static_assert!(N >= 1 && N <= 8);
24771 unsafe {
24772 simd_shuffle!(
24773 a,
24774 vshrn_n_u16::<N>(b),
24775 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24776 )
24777 }
24778}
24779#[doc = "Shift right narrow"]
24780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24781#[inline(always)]
24782#[target_feature(enable = "neon")]
24783#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24784#[rustc_legacy_const_generics(2)]
24785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24786pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24787 static_assert!(N >= 1 && N <= 16);
24788 unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24789}
24790#[doc = "Shift right narrow"]
24791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24792#[inline(always)]
24793#[target_feature(enable = "neon")]
24794#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24795#[rustc_legacy_const_generics(2)]
24796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24797pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24798 static_assert!(N >= 1 && N <= 32);
24799 unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24800}
24801#[doc = "Shift Left and Insert (immediate)"]
24802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24803#[inline(always)]
24804#[target_feature(enable = "neon")]
24805#[cfg_attr(test, assert_instr(sli, N = 1))]
24806#[rustc_legacy_const_generics(2)]
24807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24808pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24809 static_assert_uimm_bits!(N, 3);
24810 unsafe extern "unadjusted" {
24811 #[cfg_attr(
24812 any(target_arch = "aarch64", target_arch = "arm64ec"),
24813 link_name = "llvm.aarch64.neon.vsli.v8i8"
24814 )]
24815 fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24816 }
24817 unsafe { _vsli_n_s8(a, b, N) }
24818}
24819#[doc = "Shift Left and Insert (immediate)"]
24820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24821#[inline(always)]
24822#[target_feature(enable = "neon")]
24823#[cfg_attr(test, assert_instr(sli, N = 1))]
24824#[rustc_legacy_const_generics(2)]
24825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24826pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24827 static_assert_uimm_bits!(N, 3);
24828 unsafe extern "unadjusted" {
24829 #[cfg_attr(
24830 any(target_arch = "aarch64", target_arch = "arm64ec"),
24831 link_name = "llvm.aarch64.neon.vsli.v16i8"
24832 )]
24833 fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24834 }
24835 unsafe { _vsliq_n_s8(a, b, N) }
24836}
24837#[doc = "Shift Left and Insert (immediate)"]
24838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24839#[inline(always)]
24840#[target_feature(enable = "neon")]
24841#[cfg_attr(test, assert_instr(sli, N = 1))]
24842#[rustc_legacy_const_generics(2)]
24843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24844pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24845 static_assert_uimm_bits!(N, 4);
24846 unsafe extern "unadjusted" {
24847 #[cfg_attr(
24848 any(target_arch = "aarch64", target_arch = "arm64ec"),
24849 link_name = "llvm.aarch64.neon.vsli.v4i16"
24850 )]
24851 fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24852 }
24853 unsafe { _vsli_n_s16(a, b, N) }
24854}
24855#[doc = "Shift Left and Insert (immediate)"]
24856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24857#[inline(always)]
24858#[target_feature(enable = "neon")]
24859#[cfg_attr(test, assert_instr(sli, N = 1))]
24860#[rustc_legacy_const_generics(2)]
24861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24862pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24863 static_assert_uimm_bits!(N, 4);
24864 unsafe extern "unadjusted" {
24865 #[cfg_attr(
24866 any(target_arch = "aarch64", target_arch = "arm64ec"),
24867 link_name = "llvm.aarch64.neon.vsli.v8i16"
24868 )]
24869 fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24870 }
24871 unsafe { _vsliq_n_s16(a, b, N) }
24872}
24873#[doc = "Shift Left and Insert (immediate)"]
24874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24875#[inline(always)]
24876#[target_feature(enable = "neon")]
24877#[cfg_attr(test, assert_instr(sli, N = 1))]
24878#[rustc_legacy_const_generics(2)]
24879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24880pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24881 static_assert!(N >= 0 && N <= 31);
24882 unsafe extern "unadjusted" {
24883 #[cfg_attr(
24884 any(target_arch = "aarch64", target_arch = "arm64ec"),
24885 link_name = "llvm.aarch64.neon.vsli.v2i32"
24886 )]
24887 fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24888 }
24889 unsafe { _vsli_n_s32(a, b, N) }
24890}
24891#[doc = "Shift Left and Insert (immediate)"]
24892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24893#[inline(always)]
24894#[target_feature(enable = "neon")]
24895#[cfg_attr(test, assert_instr(sli, N = 1))]
24896#[rustc_legacy_const_generics(2)]
24897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24898pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24899 static_assert!(N >= 0 && N <= 31);
24900 unsafe extern "unadjusted" {
24901 #[cfg_attr(
24902 any(target_arch = "aarch64", target_arch = "arm64ec"),
24903 link_name = "llvm.aarch64.neon.vsli.v4i32"
24904 )]
24905 fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24906 }
24907 unsafe { _vsliq_n_s32(a, b, N) }
24908}
24909#[doc = "Shift Left and Insert (immediate)"]
24910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24911#[inline(always)]
24912#[target_feature(enable = "neon")]
24913#[cfg_attr(test, assert_instr(sli, N = 1))]
24914#[rustc_legacy_const_generics(2)]
24915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24916pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24917 static_assert!(N >= 0 && N <= 63);
24918 unsafe extern "unadjusted" {
24919 #[cfg_attr(
24920 any(target_arch = "aarch64", target_arch = "arm64ec"),
24921 link_name = "llvm.aarch64.neon.vsli.v1i64"
24922 )]
24923 fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24924 }
24925 unsafe { _vsli_n_s64(a, b, N) }
24926}
24927#[doc = "Shift Left and Insert (immediate)"]
24928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24929#[inline(always)]
24930#[target_feature(enable = "neon")]
24931#[cfg_attr(test, assert_instr(sli, N = 1))]
24932#[rustc_legacy_const_generics(2)]
24933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24934pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24935 static_assert!(N >= 0 && N <= 63);
24936 unsafe extern "unadjusted" {
24937 #[cfg_attr(
24938 any(target_arch = "aarch64", target_arch = "arm64ec"),
24939 link_name = "llvm.aarch64.neon.vsli.v2i64"
24940 )]
24941 fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24942 }
24943 unsafe { _vsliq_n_s64(a, b, N) }
24944}
24945#[doc = "Shift Left and Insert (immediate)"]
24946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24947#[inline(always)]
24948#[target_feature(enable = "neon")]
24949#[cfg_attr(test, assert_instr(sli, N = 1))]
24950#[rustc_legacy_const_generics(2)]
24951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24952pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24953 static_assert_uimm_bits!(N, 3);
24954 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24955}
24956#[doc = "Shift Left and Insert (immediate)"]
24957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24958#[inline(always)]
24959#[target_feature(enable = "neon")]
24960#[cfg_attr(test, assert_instr(sli, N = 1))]
24961#[rustc_legacy_const_generics(2)]
24962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24963pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24964 static_assert_uimm_bits!(N, 3);
24965 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24966}
24967#[doc = "Shift Left and Insert (immediate)"]
24968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24969#[inline(always)]
24970#[target_feature(enable = "neon")]
24971#[cfg_attr(test, assert_instr(sli, N = 1))]
24972#[rustc_legacy_const_generics(2)]
24973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24974pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24975 static_assert_uimm_bits!(N, 4);
24976 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24977}
24978#[doc = "Shift Left and Insert (immediate)"]
24979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24980#[inline(always)]
24981#[target_feature(enable = "neon")]
24982#[cfg_attr(test, assert_instr(sli, N = 1))]
24983#[rustc_legacy_const_generics(2)]
24984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24985pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24986 static_assert_uimm_bits!(N, 4);
24987 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24988}
24989#[doc = "Shift Left and Insert (immediate)"]
24990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24991#[inline(always)]
24992#[target_feature(enable = "neon")]
24993#[cfg_attr(test, assert_instr(sli, N = 1))]
24994#[rustc_legacy_const_generics(2)]
24995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24996pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24997 static_assert!(N >= 0 && N <= 31);
24998 unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24999}
25000#[doc = "Shift Left and Insert (immediate)"]
25001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
25002#[inline(always)]
25003#[target_feature(enable = "neon")]
25004#[cfg_attr(test, assert_instr(sli, N = 1))]
25005#[rustc_legacy_const_generics(2)]
25006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25007pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25008 static_assert!(N >= 0 && N <= 31);
25009 unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
25010}
25011#[doc = "Shift Left and Insert (immediate)"]
25012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
25013#[inline(always)]
25014#[target_feature(enable = "neon")]
25015#[cfg_attr(test, assert_instr(sli, N = 1))]
25016#[rustc_legacy_const_generics(2)]
25017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25018pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25019 static_assert!(N >= 0 && N <= 63);
25020 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25021}
25022#[doc = "Shift Left and Insert (immediate)"]
25023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
25024#[inline(always)]
25025#[target_feature(enable = "neon")]
25026#[cfg_attr(test, assert_instr(sli, N = 1))]
25027#[rustc_legacy_const_generics(2)]
25028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25029pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25030 static_assert!(N >= 0 && N <= 63);
25031 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25032}
25033#[doc = "Shift Left and Insert (immediate)"]
25034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
25035#[inline(always)]
25036#[target_feature(enable = "neon")]
25037#[cfg_attr(test, assert_instr(sli, N = 1))]
25038#[rustc_legacy_const_generics(2)]
25039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25040pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25041 static_assert_uimm_bits!(N, 3);
25042 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
25043}
25044#[doc = "Shift Left and Insert (immediate)"]
25045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
25046#[inline(always)]
25047#[target_feature(enable = "neon")]
25048#[cfg_attr(test, assert_instr(sli, N = 1))]
25049#[rustc_legacy_const_generics(2)]
25050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25051pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25052 static_assert_uimm_bits!(N, 3);
25053 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
25054}
25055#[doc = "Shift Left and Insert (immediate)"]
25056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
25057#[inline(always)]
25058#[target_feature(enable = "neon")]
25059#[cfg_attr(test, assert_instr(sli, N = 1))]
25060#[rustc_legacy_const_generics(2)]
25061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25062pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25063 static_assert_uimm_bits!(N, 4);
25064 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
25065}
25066#[doc = "Shift Left and Insert (immediate)"]
25067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
25068#[inline(always)]
25069#[target_feature(enable = "neon")]
25070#[cfg_attr(test, assert_instr(sli, N = 1))]
25071#[rustc_legacy_const_generics(2)]
25072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25073pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25074 static_assert_uimm_bits!(N, 4);
25075 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
25076}
25077#[doc = "Shift Left and Insert (immediate)"]
25078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
25079#[inline(always)]
25080#[target_feature(enable = "neon,aes")]
25081#[cfg_attr(test, assert_instr(sli, N = 1))]
25082#[rustc_legacy_const_generics(2)]
25083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25084pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25085 static_assert!(N >= 0 && N <= 63);
25086 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25087}
25088#[doc = "Shift Left and Insert (immediate)"]
25089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
25090#[inline(always)]
25091#[target_feature(enable = "neon,aes")]
25092#[cfg_attr(test, assert_instr(sli, N = 1))]
25093#[rustc_legacy_const_generics(2)]
25094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25095pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25096 static_assert!(N >= 0 && N <= 63);
25097 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25098}
25099#[doc = "Shift left and insert"]
25100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
25101#[inline(always)]
25102#[target_feature(enable = "neon")]
25103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25104#[rustc_legacy_const_generics(2)]
25105#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25106pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25107 static_assert!(N >= 0 && N <= 63);
25108 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25109}
25110#[doc = "Shift left and insert"]
25111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
25112#[inline(always)]
25113#[target_feature(enable = "neon")]
25114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25115#[rustc_legacy_const_generics(2)]
25116#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25117pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25118 static_assert!(N >= 0 && N <= 63);
25119 unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
25120}
25121#[doc = "SM3PARTW1"]
25122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
25123#[inline(always)]
25124#[target_feature(enable = "neon,sm4")]
25125#[cfg_attr(test, assert_instr(sm3partw1))]
25126#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25127pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25128 unsafe extern "unadjusted" {
25129 #[cfg_attr(
25130 any(target_arch = "aarch64", target_arch = "arm64ec"),
25131 link_name = "llvm.aarch64.crypto.sm3partw1"
25132 )]
25133 fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25134 }
25135 unsafe { _vsm3partw1q_u32(a, b, c) }
25136}
25137#[doc = "SM3PARTW2"]
25138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
25139#[inline(always)]
25140#[target_feature(enable = "neon,sm4")]
25141#[cfg_attr(test, assert_instr(sm3partw2))]
25142#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25143pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25144 unsafe extern "unadjusted" {
25145 #[cfg_attr(
25146 any(target_arch = "aarch64", target_arch = "arm64ec"),
25147 link_name = "llvm.aarch64.crypto.sm3partw2"
25148 )]
25149 fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25150 }
25151 unsafe { _vsm3partw2q_u32(a, b, c) }
25152}
25153#[doc = "SM3SS1"]
25154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
25155#[inline(always)]
25156#[target_feature(enable = "neon,sm4")]
25157#[cfg_attr(test, assert_instr(sm3ss1))]
25158#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25159pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25160 unsafe extern "unadjusted" {
25161 #[cfg_attr(
25162 any(target_arch = "aarch64", target_arch = "arm64ec"),
25163 link_name = "llvm.aarch64.crypto.sm3ss1"
25164 )]
25165 fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25166 }
25167 unsafe { _vsm3ss1q_u32(a, b, c) }
25168}
25169#[doc = "SM3TT1A"]
25170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
25171#[inline(always)]
25172#[target_feature(enable = "neon,sm4")]
25173#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
25174#[rustc_legacy_const_generics(3)]
25175#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25176pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25177 static_assert_uimm_bits!(IMM2, 2);
25178 unsafe extern "unadjusted" {
25179 #[cfg_attr(
25180 any(target_arch = "aarch64", target_arch = "arm64ec"),
25181 link_name = "llvm.aarch64.crypto.sm3tt1a"
25182 )]
25183 fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25184 }
25185 unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
25186}
25187#[doc = "SM3TT1B"]
25188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
25189#[inline(always)]
25190#[target_feature(enable = "neon,sm4")]
25191#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
25192#[rustc_legacy_const_generics(3)]
25193#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25194pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25195 static_assert_uimm_bits!(IMM2, 2);
25196 unsafe extern "unadjusted" {
25197 #[cfg_attr(
25198 any(target_arch = "aarch64", target_arch = "arm64ec"),
25199 link_name = "llvm.aarch64.crypto.sm3tt1b"
25200 )]
25201 fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25202 }
25203 unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
25204}
25205#[doc = "SM3TT2A"]
25206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
25207#[inline(always)]
25208#[target_feature(enable = "neon,sm4")]
25209#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
25210#[rustc_legacy_const_generics(3)]
25211#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25212pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25213 static_assert_uimm_bits!(IMM2, 2);
25214 unsafe extern "unadjusted" {
25215 #[cfg_attr(
25216 any(target_arch = "aarch64", target_arch = "arm64ec"),
25217 link_name = "llvm.aarch64.crypto.sm3tt2a"
25218 )]
25219 fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25220 }
25221 unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
25222}
25223#[doc = "SM3TT2B"]
25224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
25225#[inline(always)]
25226#[target_feature(enable = "neon,sm4")]
25227#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
25228#[rustc_legacy_const_generics(3)]
25229#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25230pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25231 static_assert_uimm_bits!(IMM2, 2);
25232 unsafe extern "unadjusted" {
25233 #[cfg_attr(
25234 any(target_arch = "aarch64", target_arch = "arm64ec"),
25235 link_name = "llvm.aarch64.crypto.sm3tt2b"
25236 )]
25237 fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25238 }
25239 unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
25240}
25241#[doc = "SM4 key"]
25242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
25243#[inline(always)]
25244#[target_feature(enable = "neon,sm4")]
25245#[cfg_attr(test, assert_instr(sm4ekey))]
25246#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25247pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25248 unsafe extern "unadjusted" {
25249 #[cfg_attr(
25250 any(target_arch = "aarch64", target_arch = "arm64ec"),
25251 link_name = "llvm.aarch64.crypto.sm4ekey"
25252 )]
25253 fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25254 }
25255 unsafe { _vsm4ekeyq_u32(a, b) }
25256}
25257#[doc = "SM4 encode"]
25258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
25259#[inline(always)]
25260#[target_feature(enable = "neon,sm4")]
25261#[cfg_attr(test, assert_instr(sm4e))]
25262#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25263pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25264 unsafe extern "unadjusted" {
25265 #[cfg_attr(
25266 any(target_arch = "aarch64", target_arch = "arm64ec"),
25267 link_name = "llvm.aarch64.crypto.sm4e"
25268 )]
25269 fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25270 }
25271 unsafe { _vsm4eq_u32(a, b) }
25272}
25273#[doc = "Unsigned saturating Accumulate of Signed value."]
25274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
25275#[inline(always)]
25276#[target_feature(enable = "neon")]
25277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25278#[cfg_attr(test, assert_instr(usqadd))]
25279pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
25280 unsafe extern "unadjusted" {
25281 #[cfg_attr(
25282 any(target_arch = "aarch64", target_arch = "arm64ec"),
25283 link_name = "llvm.aarch64.neon.usqadd.v8i8"
25284 )]
25285 fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
25286 }
25287 unsafe { _vsqadd_u8(a, b) }
25288}
25289#[doc = "Unsigned saturating Accumulate of Signed value."]
25290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
25291#[inline(always)]
25292#[target_feature(enable = "neon")]
25293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25294#[cfg_attr(test, assert_instr(usqadd))]
25295pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
25296 unsafe extern "unadjusted" {
25297 #[cfg_attr(
25298 any(target_arch = "aarch64", target_arch = "arm64ec"),
25299 link_name = "llvm.aarch64.neon.usqadd.v16i8"
25300 )]
25301 fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
25302 }
25303 unsafe { _vsqaddq_u8(a, b) }
25304}
25305#[doc = "Unsigned saturating Accumulate of Signed value."]
25306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
25307#[inline(always)]
25308#[target_feature(enable = "neon")]
25309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25310#[cfg_attr(test, assert_instr(usqadd))]
25311pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
25312 unsafe extern "unadjusted" {
25313 #[cfg_attr(
25314 any(target_arch = "aarch64", target_arch = "arm64ec"),
25315 link_name = "llvm.aarch64.neon.usqadd.v4i16"
25316 )]
25317 fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
25318 }
25319 unsafe { _vsqadd_u16(a, b) }
25320}
25321#[doc = "Unsigned saturating Accumulate of Signed value."]
25322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
25323#[inline(always)]
25324#[target_feature(enable = "neon")]
25325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25326#[cfg_attr(test, assert_instr(usqadd))]
25327pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
25328 unsafe extern "unadjusted" {
25329 #[cfg_attr(
25330 any(target_arch = "aarch64", target_arch = "arm64ec"),
25331 link_name = "llvm.aarch64.neon.usqadd.v8i16"
25332 )]
25333 fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
25334 }
25335 unsafe { _vsqaddq_u16(a, b) }
25336}
25337#[doc = "Unsigned saturating Accumulate of Signed value."]
25338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
25339#[inline(always)]
25340#[target_feature(enable = "neon")]
25341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25342#[cfg_attr(test, assert_instr(usqadd))]
25343pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
25344 unsafe extern "unadjusted" {
25345 #[cfg_attr(
25346 any(target_arch = "aarch64", target_arch = "arm64ec"),
25347 link_name = "llvm.aarch64.neon.usqadd.v2i32"
25348 )]
25349 fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
25350 }
25351 unsafe { _vsqadd_u32(a, b) }
25352}
25353#[doc = "Unsigned saturating Accumulate of Signed value."]
25354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
25355#[inline(always)]
25356#[target_feature(enable = "neon")]
25357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25358#[cfg_attr(test, assert_instr(usqadd))]
25359pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
25360 unsafe extern "unadjusted" {
25361 #[cfg_attr(
25362 any(target_arch = "aarch64", target_arch = "arm64ec"),
25363 link_name = "llvm.aarch64.neon.usqadd.v4i32"
25364 )]
25365 fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
25366 }
25367 unsafe { _vsqaddq_u32(a, b) }
25368}
25369#[doc = "Unsigned saturating Accumulate of Signed value."]
25370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
25371#[inline(always)]
25372#[target_feature(enable = "neon")]
25373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25374#[cfg_attr(test, assert_instr(usqadd))]
25375pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
25376 unsafe extern "unadjusted" {
25377 #[cfg_attr(
25378 any(target_arch = "aarch64", target_arch = "arm64ec"),
25379 link_name = "llvm.aarch64.neon.usqadd.v1i64"
25380 )]
25381 fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
25382 }
25383 unsafe { _vsqadd_u64(a, b) }
25384}
25385#[doc = "Unsigned saturating Accumulate of Signed value."]
25386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
25387#[inline(always)]
25388#[target_feature(enable = "neon")]
25389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25390#[cfg_attr(test, assert_instr(usqadd))]
25391pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
25392 unsafe extern "unadjusted" {
25393 #[cfg_attr(
25394 any(target_arch = "aarch64", target_arch = "arm64ec"),
25395 link_name = "llvm.aarch64.neon.usqadd.v2i64"
25396 )]
25397 fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
25398 }
25399 unsafe { _vsqaddq_u64(a, b) }
25400}
25401#[doc = "Unsigned saturating accumulate of signed value"]
25402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
25403#[inline(always)]
25404#[target_feature(enable = "neon")]
25405#[cfg_attr(test, assert_instr(usqadd))]
25406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25407pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
25408 unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
25409}
25410#[doc = "Unsigned saturating accumulate of signed value"]
25411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
25412#[inline(always)]
25413#[target_feature(enable = "neon")]
25414#[cfg_attr(test, assert_instr(usqadd))]
25415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25416pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
25417 unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
25418}
25419#[doc = "Unsigned saturating accumulate of signed value"]
25420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25421#[inline(always)]
25422#[target_feature(enable = "neon")]
25423#[cfg_attr(test, assert_instr(usqadd))]
25424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25425pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25426 unsafe extern "unadjusted" {
25427 #[cfg_attr(
25428 any(target_arch = "aarch64", target_arch = "arm64ec"),
25429 link_name = "llvm.aarch64.neon.usqadd.i64"
25430 )]
25431 fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25432 }
25433 unsafe { _vsqaddd_u64(a, b) }
25434}
25435#[doc = "Unsigned saturating accumulate of signed value"]
25436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25437#[inline(always)]
25438#[target_feature(enable = "neon")]
25439#[cfg_attr(test, assert_instr(usqadd))]
25440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25441pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25442 unsafe extern "unadjusted" {
25443 #[cfg_attr(
25444 any(target_arch = "aarch64", target_arch = "arm64ec"),
25445 link_name = "llvm.aarch64.neon.usqadd.i32"
25446 )]
25447 fn _vsqadds_u32(a: u32, b: i32) -> u32;
25448 }
25449 unsafe { _vsqadds_u32(a, b) }
25450}
25451#[doc = "Calculates the square root of each lane."]
25452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25453#[inline(always)]
25454#[cfg_attr(test, assert_instr(fsqrt))]
25455#[target_feature(enable = "neon,fp16")]
25456#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
25457#[cfg(not(target_arch = "arm64ec"))]
25458pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25459 unsafe { simd_fsqrt(a) }
25460}
25461#[doc = "Calculates the square root of each lane."]
25462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25463#[inline(always)]
25464#[cfg_attr(test, assert_instr(fsqrt))]
25465#[target_feature(enable = "neon,fp16")]
25466#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
25467#[cfg(not(target_arch = "arm64ec"))]
25468pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25469 unsafe { simd_fsqrt(a) }
25470}
25471#[doc = "Calculates the square root of each lane."]
25472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25473#[inline(always)]
25474#[target_feature(enable = "neon")]
25475#[cfg_attr(test, assert_instr(fsqrt))]
25476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25477pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25478 unsafe { simd_fsqrt(a) }
25479}
25480#[doc = "Calculates the square root of each lane."]
25481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25482#[inline(always)]
25483#[target_feature(enable = "neon")]
25484#[cfg_attr(test, assert_instr(fsqrt))]
25485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25486pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25487 unsafe { simd_fsqrt(a) }
25488}
25489#[doc = "Calculates the square root of each lane."]
25490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25491#[inline(always)]
25492#[target_feature(enable = "neon")]
25493#[cfg_attr(test, assert_instr(fsqrt))]
25494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25495pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25496 unsafe { simd_fsqrt(a) }
25497}
25498#[doc = "Calculates the square root of each lane."]
25499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25500#[inline(always)]
25501#[target_feature(enable = "neon")]
25502#[cfg_attr(test, assert_instr(fsqrt))]
25503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25504pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25505 unsafe { simd_fsqrt(a) }
25506}
25507#[doc = "Floating-point round to integral, using current rounding mode"]
25508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25509#[inline(always)]
25510#[target_feature(enable = "neon,fp16")]
25511#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25512#[cfg(not(target_arch = "arm64ec"))]
25513#[cfg_attr(test, assert_instr(fsqrt))]
25514pub fn vsqrth_f16(a: f16) -> f16 {
25515 sqrtf16(a)
25516}
25517#[doc = "Shift Right and Insert (immediate)"]
25518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25519#[inline(always)]
25520#[target_feature(enable = "neon")]
25521#[cfg_attr(test, assert_instr(sri, N = 1))]
25522#[rustc_legacy_const_generics(2)]
25523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25524pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25525 static_assert!(N >= 1 && N <= 8);
25526 unsafe extern "unadjusted" {
25527 #[cfg_attr(
25528 any(target_arch = "aarch64", target_arch = "arm64ec"),
25529 link_name = "llvm.aarch64.neon.vsri.v8i8"
25530 )]
25531 fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25532 }
25533 unsafe { _vsri_n_s8(a, b, N) }
25534}
25535#[doc = "Shift Right and Insert (immediate)"]
25536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25537#[inline(always)]
25538#[target_feature(enable = "neon")]
25539#[cfg_attr(test, assert_instr(sri, N = 1))]
25540#[rustc_legacy_const_generics(2)]
25541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25542pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25543 static_assert!(N >= 1 && N <= 8);
25544 unsafe extern "unadjusted" {
25545 #[cfg_attr(
25546 any(target_arch = "aarch64", target_arch = "arm64ec"),
25547 link_name = "llvm.aarch64.neon.vsri.v16i8"
25548 )]
25549 fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25550 }
25551 unsafe { _vsriq_n_s8(a, b, N) }
25552}
25553#[doc = "Shift Right and Insert (immediate)"]
25554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25555#[inline(always)]
25556#[target_feature(enable = "neon")]
25557#[cfg_attr(test, assert_instr(sri, N = 1))]
25558#[rustc_legacy_const_generics(2)]
25559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25560pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25561 static_assert!(N >= 1 && N <= 16);
25562 unsafe extern "unadjusted" {
25563 #[cfg_attr(
25564 any(target_arch = "aarch64", target_arch = "arm64ec"),
25565 link_name = "llvm.aarch64.neon.vsri.v4i16"
25566 )]
25567 fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25568 }
25569 unsafe { _vsri_n_s16(a, b, N) }
25570}
25571#[doc = "Shift Right and Insert (immediate)"]
25572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25573#[inline(always)]
25574#[target_feature(enable = "neon")]
25575#[cfg_attr(test, assert_instr(sri, N = 1))]
25576#[rustc_legacy_const_generics(2)]
25577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25578pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25579 static_assert!(N >= 1 && N <= 16);
25580 unsafe extern "unadjusted" {
25581 #[cfg_attr(
25582 any(target_arch = "aarch64", target_arch = "arm64ec"),
25583 link_name = "llvm.aarch64.neon.vsri.v8i16"
25584 )]
25585 fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25586 }
25587 unsafe { _vsriq_n_s16(a, b, N) }
25588}
25589#[doc = "Shift Right and Insert (immediate)"]
25590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25591#[inline(always)]
25592#[target_feature(enable = "neon")]
25593#[cfg_attr(test, assert_instr(sri, N = 1))]
25594#[rustc_legacy_const_generics(2)]
25595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25596pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25597 static_assert!(N >= 1 && N <= 32);
25598 unsafe extern "unadjusted" {
25599 #[cfg_attr(
25600 any(target_arch = "aarch64", target_arch = "arm64ec"),
25601 link_name = "llvm.aarch64.neon.vsri.v2i32"
25602 )]
25603 fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25604 }
25605 unsafe { _vsri_n_s32(a, b, N) }
25606}
25607#[doc = "Shift Right and Insert (immediate)"]
25608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25609#[inline(always)]
25610#[target_feature(enable = "neon")]
25611#[cfg_attr(test, assert_instr(sri, N = 1))]
25612#[rustc_legacy_const_generics(2)]
25613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25614pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25615 static_assert!(N >= 1 && N <= 32);
25616 unsafe extern "unadjusted" {
25617 #[cfg_attr(
25618 any(target_arch = "aarch64", target_arch = "arm64ec"),
25619 link_name = "llvm.aarch64.neon.vsri.v4i32"
25620 )]
25621 fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25622 }
25623 unsafe { _vsriq_n_s32(a, b, N) }
25624}
25625#[doc = "Shift Right and Insert (immediate)"]
25626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25627#[inline(always)]
25628#[target_feature(enable = "neon")]
25629#[cfg_attr(test, assert_instr(sri, N = 1))]
25630#[rustc_legacy_const_generics(2)]
25631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25632pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25633 static_assert!(N >= 1 && N <= 64);
25634 unsafe extern "unadjusted" {
25635 #[cfg_attr(
25636 any(target_arch = "aarch64", target_arch = "arm64ec"),
25637 link_name = "llvm.aarch64.neon.vsri.v1i64"
25638 )]
25639 fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25640 }
25641 unsafe { _vsri_n_s64(a, b, N) }
25642}
25643#[doc = "Shift Right and Insert (immediate)"]
25644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25645#[inline(always)]
25646#[target_feature(enable = "neon")]
25647#[cfg_attr(test, assert_instr(sri, N = 1))]
25648#[rustc_legacy_const_generics(2)]
25649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25650pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25651 static_assert!(N >= 1 && N <= 64);
25652 unsafe extern "unadjusted" {
25653 #[cfg_attr(
25654 any(target_arch = "aarch64", target_arch = "arm64ec"),
25655 link_name = "llvm.aarch64.neon.vsri.v2i64"
25656 )]
25657 fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25658 }
25659 unsafe { _vsriq_n_s64(a, b, N) }
25660}
25661#[doc = "Shift Right and Insert (immediate)"]
25662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25663#[inline(always)]
25664#[target_feature(enable = "neon")]
25665#[cfg_attr(test, assert_instr(sri, N = 1))]
25666#[rustc_legacy_const_generics(2)]
25667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25668pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25669 static_assert!(N >= 1 && N <= 8);
25670 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25671}
25672#[doc = "Shift Right and Insert (immediate)"]
25673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25674#[inline(always)]
25675#[target_feature(enable = "neon")]
25676#[cfg_attr(test, assert_instr(sri, N = 1))]
25677#[rustc_legacy_const_generics(2)]
25678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25679pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25680 static_assert!(N >= 1 && N <= 8);
25681 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25682}
25683#[doc = "Shift Right and Insert (immediate)"]
25684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25685#[inline(always)]
25686#[target_feature(enable = "neon")]
25687#[cfg_attr(test, assert_instr(sri, N = 1))]
25688#[rustc_legacy_const_generics(2)]
25689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25690pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25691 static_assert!(N >= 1 && N <= 16);
25692 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25693}
25694#[doc = "Shift Right and Insert (immediate)"]
25695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25696#[inline(always)]
25697#[target_feature(enable = "neon")]
25698#[cfg_attr(test, assert_instr(sri, N = 1))]
25699#[rustc_legacy_const_generics(2)]
25700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25701pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25702 static_assert!(N >= 1 && N <= 16);
25703 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25704}
25705#[doc = "Shift Right and Insert (immediate)"]
25706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25707#[inline(always)]
25708#[target_feature(enable = "neon")]
25709#[cfg_attr(test, assert_instr(sri, N = 1))]
25710#[rustc_legacy_const_generics(2)]
25711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25712pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25713 static_assert!(N >= 1 && N <= 32);
25714 unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25715}
25716#[doc = "Shift Right and Insert (immediate)"]
25717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25718#[inline(always)]
25719#[target_feature(enable = "neon")]
25720#[cfg_attr(test, assert_instr(sri, N = 1))]
25721#[rustc_legacy_const_generics(2)]
25722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25723pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25724 static_assert!(N >= 1 && N <= 32);
25725 unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25726}
25727#[doc = "Shift Right and Insert (immediate)"]
25728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25729#[inline(always)]
25730#[target_feature(enable = "neon")]
25731#[cfg_attr(test, assert_instr(sri, N = 1))]
25732#[rustc_legacy_const_generics(2)]
25733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25734pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25735 static_assert!(N >= 1 && N <= 64);
25736 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25737}
25738#[doc = "Shift Right and Insert (immediate)"]
25739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25740#[inline(always)]
25741#[target_feature(enable = "neon")]
25742#[cfg_attr(test, assert_instr(sri, N = 1))]
25743#[rustc_legacy_const_generics(2)]
25744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25745pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25746 static_assert!(N >= 1 && N <= 64);
25747 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25748}
25749#[doc = "Shift Right and Insert (immediate)"]
25750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25751#[inline(always)]
25752#[target_feature(enable = "neon")]
25753#[cfg_attr(test, assert_instr(sri, N = 1))]
25754#[rustc_legacy_const_generics(2)]
25755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25756pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25757 static_assert!(N >= 1 && N <= 8);
25758 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25759}
25760#[doc = "Shift Right and Insert (immediate)"]
25761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25762#[inline(always)]
25763#[target_feature(enable = "neon")]
25764#[cfg_attr(test, assert_instr(sri, N = 1))]
25765#[rustc_legacy_const_generics(2)]
25766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25767pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25768 static_assert!(N >= 1 && N <= 8);
25769 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25770}
25771#[doc = "Shift Right and Insert (immediate)"]
25772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25773#[inline(always)]
25774#[target_feature(enable = "neon")]
25775#[cfg_attr(test, assert_instr(sri, N = 1))]
25776#[rustc_legacy_const_generics(2)]
25777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25778pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25779 static_assert!(N >= 1 && N <= 16);
25780 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25781}
25782#[doc = "Shift Right and Insert (immediate)"]
25783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25784#[inline(always)]
25785#[target_feature(enable = "neon")]
25786#[cfg_attr(test, assert_instr(sri, N = 1))]
25787#[rustc_legacy_const_generics(2)]
25788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25789pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25790 static_assert!(N >= 1 && N <= 16);
25791 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25792}
25793#[doc = "Shift Right and Insert (immediate)"]
25794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25795#[inline(always)]
25796#[target_feature(enable = "neon,aes")]
25797#[cfg_attr(test, assert_instr(sri, N = 1))]
25798#[rustc_legacy_const_generics(2)]
25799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25800pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25801 static_assert!(N >= 1 && N <= 64);
25802 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25803}
25804#[doc = "Shift Right and Insert (immediate)"]
25805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25806#[inline(always)]
25807#[target_feature(enable = "neon,aes")]
25808#[cfg_attr(test, assert_instr(sri, N = 1))]
25809#[rustc_legacy_const_generics(2)]
25810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25811pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25812 static_assert!(N >= 1 && N <= 64);
25813 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25814}
25815#[doc = "Shift right and insert"]
25816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25817#[inline(always)]
25818#[target_feature(enable = "neon")]
25819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25820#[rustc_legacy_const_generics(2)]
25821#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25822pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25823 static_assert!(N >= 1 && N <= 64);
25824 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25825}
25826#[doc = "Shift right and insert"]
25827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25828#[inline(always)]
25829#[target_feature(enable = "neon")]
25830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25831#[rustc_legacy_const_generics(2)]
25832#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25833pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25834 static_assert!(N >= 1 && N <= 64);
25835 unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25836}
25837#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25839#[doc = "## Safety"]
25840#[doc = " * Neon intrinsic unsafe"]
25841#[inline(always)]
25842#[target_feature(enable = "neon,fp16")]
25843#[cfg_attr(test, assert_instr(str))]
25844#[allow(clippy::cast_ptr_alignment)]
25845#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25846#[cfg(not(target_arch = "arm64ec"))]
25847pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25848 crate::ptr::write_unaligned(ptr.cast(), a)
25849}
25850#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25852#[doc = "## Safety"]
25853#[doc = " * Neon intrinsic unsafe"]
25854#[inline(always)]
25855#[target_feature(enable = "neon,fp16")]
25856#[cfg_attr(test, assert_instr(str))]
25857#[allow(clippy::cast_ptr_alignment)]
25858#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25859#[cfg(not(target_arch = "arm64ec"))]
25860pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25861 crate::ptr::write_unaligned(ptr.cast(), a)
25862}
25863#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25865#[doc = "## Safety"]
25866#[doc = " * Neon intrinsic unsafe"]
25867#[inline(always)]
25868#[target_feature(enable = "neon")]
25869#[cfg_attr(test, assert_instr(str))]
25870#[allow(clippy::cast_ptr_alignment)]
25871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25872pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25873 crate::ptr::write_unaligned(ptr.cast(), a)
25874}
25875#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25877#[doc = "## Safety"]
25878#[doc = " * Neon intrinsic unsafe"]
25879#[inline(always)]
25880#[target_feature(enable = "neon")]
25881#[cfg_attr(test, assert_instr(str))]
25882#[allow(clippy::cast_ptr_alignment)]
25883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25884pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25885 crate::ptr::write_unaligned(ptr.cast(), a)
25886}
25887#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25889#[doc = "## Safety"]
25890#[doc = " * Neon intrinsic unsafe"]
25891#[inline(always)]
25892#[target_feature(enable = "neon")]
25893#[cfg_attr(test, assert_instr(str))]
25894#[allow(clippy::cast_ptr_alignment)]
25895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25896pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25897 crate::ptr::write_unaligned(ptr.cast(), a)
25898}
25899#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25901#[doc = "## Safety"]
25902#[doc = " * Neon intrinsic unsafe"]
25903#[inline(always)]
25904#[target_feature(enable = "neon")]
25905#[cfg_attr(test, assert_instr(str))]
25906#[allow(clippy::cast_ptr_alignment)]
25907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25908pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25909 crate::ptr::write_unaligned(ptr.cast(), a)
25910}
25911#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25913#[doc = "## Safety"]
25914#[doc = " * Neon intrinsic unsafe"]
25915#[inline(always)]
25916#[target_feature(enable = "neon")]
25917#[cfg_attr(test, assert_instr(str))]
25918#[allow(clippy::cast_ptr_alignment)]
25919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25920pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25921 crate::ptr::write_unaligned(ptr.cast(), a)
25922}
25923#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25925#[doc = "## Safety"]
25926#[doc = " * Neon intrinsic unsafe"]
25927#[inline(always)]
25928#[target_feature(enable = "neon")]
25929#[cfg_attr(test, assert_instr(str))]
25930#[allow(clippy::cast_ptr_alignment)]
25931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25932pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25933 crate::ptr::write_unaligned(ptr.cast(), a)
25934}
25935#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25937#[doc = "## Safety"]
25938#[doc = " * Neon intrinsic unsafe"]
25939#[inline(always)]
25940#[target_feature(enable = "neon")]
25941#[cfg_attr(test, assert_instr(str))]
25942#[allow(clippy::cast_ptr_alignment)]
25943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25944pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25945 crate::ptr::write_unaligned(ptr.cast(), a)
25946}
25947#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25949#[doc = "## Safety"]
25950#[doc = " * Neon intrinsic unsafe"]
25951#[inline(always)]
25952#[target_feature(enable = "neon")]
25953#[cfg_attr(test, assert_instr(str))]
25954#[allow(clippy::cast_ptr_alignment)]
25955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25956pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25957 crate::ptr::write_unaligned(ptr.cast(), a)
25958}
25959#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25961#[doc = "## Safety"]
25962#[doc = " * Neon intrinsic unsafe"]
25963#[inline(always)]
25964#[target_feature(enable = "neon")]
25965#[cfg_attr(test, assert_instr(str))]
25966#[allow(clippy::cast_ptr_alignment)]
25967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25968pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25969 crate::ptr::write_unaligned(ptr.cast(), a)
25970}
25971#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25973#[doc = "## Safety"]
25974#[doc = " * Neon intrinsic unsafe"]
25975#[inline(always)]
25976#[target_feature(enable = "neon")]
25977#[cfg_attr(test, assert_instr(str))]
25978#[allow(clippy::cast_ptr_alignment)]
25979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25980pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25981 crate::ptr::write_unaligned(ptr.cast(), a)
25982}
25983#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25985#[doc = "## Safety"]
25986#[doc = " * Neon intrinsic unsafe"]
25987#[inline(always)]
25988#[target_feature(enable = "neon")]
25989#[cfg_attr(test, assert_instr(str))]
25990#[allow(clippy::cast_ptr_alignment)]
25991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25992pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25993 crate::ptr::write_unaligned(ptr.cast(), a)
25994}
25995#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25997#[doc = "## Safety"]
25998#[doc = " * Neon intrinsic unsafe"]
25999#[inline(always)]
26000#[target_feature(enable = "neon")]
26001#[cfg_attr(test, assert_instr(str))]
26002#[allow(clippy::cast_ptr_alignment)]
26003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26004pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
26005 crate::ptr::write_unaligned(ptr.cast(), a)
26006}
26007#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
26009#[doc = "## Safety"]
26010#[doc = " * Neon intrinsic unsafe"]
26011#[inline(always)]
26012#[target_feature(enable = "neon")]
26013#[cfg_attr(test, assert_instr(str))]
26014#[allow(clippy::cast_ptr_alignment)]
26015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26016pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
26017 crate::ptr::write_unaligned(ptr.cast(), a)
26018}
26019#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
26021#[doc = "## Safety"]
26022#[doc = " * Neon intrinsic unsafe"]
26023#[inline(always)]
26024#[target_feature(enable = "neon")]
26025#[cfg_attr(test, assert_instr(str))]
26026#[allow(clippy::cast_ptr_alignment)]
26027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26028pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
26029 crate::ptr::write_unaligned(ptr.cast(), a)
26030}
26031#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
26033#[doc = "## Safety"]
26034#[doc = " * Neon intrinsic unsafe"]
26035#[inline(always)]
26036#[target_feature(enable = "neon")]
26037#[cfg_attr(test, assert_instr(str))]
26038#[allow(clippy::cast_ptr_alignment)]
26039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26040pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
26041 crate::ptr::write_unaligned(ptr.cast(), a)
26042}
26043#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
26045#[doc = "## Safety"]
26046#[doc = " * Neon intrinsic unsafe"]
26047#[inline(always)]
26048#[target_feature(enable = "neon")]
26049#[cfg_attr(test, assert_instr(str))]
26050#[allow(clippy::cast_ptr_alignment)]
26051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26052pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
26053 crate::ptr::write_unaligned(ptr.cast(), a)
26054}
26055#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
26057#[doc = "## Safety"]
26058#[doc = " * Neon intrinsic unsafe"]
26059#[inline(always)]
26060#[target_feature(enable = "neon")]
26061#[cfg_attr(test, assert_instr(str))]
26062#[allow(clippy::cast_ptr_alignment)]
26063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26064pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
26065 crate::ptr::write_unaligned(ptr.cast(), a)
26066}
26067#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
26069#[doc = "## Safety"]
26070#[doc = " * Neon intrinsic unsafe"]
26071#[inline(always)]
26072#[target_feature(enable = "neon")]
26073#[cfg_attr(test, assert_instr(str))]
26074#[allow(clippy::cast_ptr_alignment)]
26075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26076pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
26077 crate::ptr::write_unaligned(ptr.cast(), a)
26078}
26079#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
26081#[doc = "## Safety"]
26082#[doc = " * Neon intrinsic unsafe"]
26083#[inline(always)]
26084#[target_feature(enable = "neon")]
26085#[cfg_attr(test, assert_instr(str))]
26086#[allow(clippy::cast_ptr_alignment)]
26087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26088pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
26089 crate::ptr::write_unaligned(ptr.cast(), a)
26090}
26091#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
26093#[doc = "## Safety"]
26094#[doc = " * Neon intrinsic unsafe"]
26095#[inline(always)]
26096#[target_feature(enable = "neon")]
26097#[cfg_attr(test, assert_instr(str))]
26098#[allow(clippy::cast_ptr_alignment)]
26099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26100pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
26101 crate::ptr::write_unaligned(ptr.cast(), a)
26102}
26103#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
26105#[doc = "## Safety"]
26106#[doc = " * Neon intrinsic unsafe"]
26107#[inline(always)]
26108#[target_feature(enable = "neon")]
26109#[cfg_attr(test, assert_instr(str))]
26110#[allow(clippy::cast_ptr_alignment)]
26111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26112pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
26113 crate::ptr::write_unaligned(ptr.cast(), a)
26114}
26115#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
26117#[doc = "## Safety"]
26118#[doc = " * Neon intrinsic unsafe"]
26119#[inline(always)]
26120#[target_feature(enable = "neon")]
26121#[cfg_attr(test, assert_instr(str))]
26122#[allow(clippy::cast_ptr_alignment)]
26123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26124pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
26125 crate::ptr::write_unaligned(ptr.cast(), a)
26126}
26127#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
26129#[doc = "## Safety"]
26130#[doc = " * Neon intrinsic unsafe"]
26131#[inline(always)]
26132#[target_feature(enable = "neon")]
26133#[cfg_attr(test, assert_instr(str))]
26134#[allow(clippy::cast_ptr_alignment)]
26135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26136pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
26137 crate::ptr::write_unaligned(ptr.cast(), a)
26138}
26139#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
26141#[doc = "## Safety"]
26142#[doc = " * Neon intrinsic unsafe"]
26143#[inline(always)]
26144#[target_feature(enable = "neon")]
26145#[cfg_attr(test, assert_instr(str))]
26146#[allow(clippy::cast_ptr_alignment)]
26147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26148pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
26149 crate::ptr::write_unaligned(ptr.cast(), a)
26150}
26151#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
26153#[doc = "## Safety"]
26154#[doc = " * Neon intrinsic unsafe"]
26155#[inline(always)]
26156#[target_feature(enable = "neon,aes")]
26157#[cfg_attr(test, assert_instr(str))]
26158#[allow(clippy::cast_ptr_alignment)]
26159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26160pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
26161 crate::ptr::write_unaligned(ptr.cast(), a)
26162}
26163#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
26165#[doc = "## Safety"]
26166#[doc = " * Neon intrinsic unsafe"]
26167#[inline(always)]
26168#[target_feature(enable = "neon,aes")]
26169#[cfg_attr(test, assert_instr(str))]
26170#[allow(clippy::cast_ptr_alignment)]
26171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26172pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
26173 crate::ptr::write_unaligned(ptr.cast(), a)
26174}
26175#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
26177#[doc = "## Safety"]
26178#[doc = " * Neon intrinsic unsafe"]
26179#[inline(always)]
26180#[target_feature(enable = "neon")]
26181#[cfg_attr(test, assert_instr(st1))]
26182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26183pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
26184 unsafe extern "unadjusted" {
26185 #[cfg_attr(
26186 any(target_arch = "aarch64", target_arch = "arm64ec"),
26187 link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
26188 )]
26189 fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
26190 }
26191 _vst1_f64_x2(b.0, b.1, a)
26192}
26193#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
26195#[doc = "## Safety"]
26196#[doc = " * Neon intrinsic unsafe"]
26197#[inline(always)]
26198#[target_feature(enable = "neon")]
26199#[cfg_attr(test, assert_instr(st1))]
26200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26201pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
26202 unsafe extern "unadjusted" {
26203 #[cfg_attr(
26204 any(target_arch = "aarch64", target_arch = "arm64ec"),
26205 link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
26206 )]
26207 fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
26208 }
26209 _vst1q_f64_x2(b.0, b.1, a)
26210}
26211#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
26213#[doc = "## Safety"]
26214#[doc = " * Neon intrinsic unsafe"]
26215#[inline(always)]
26216#[target_feature(enable = "neon")]
26217#[cfg_attr(test, assert_instr(st1))]
26218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26219pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
26220 unsafe extern "unadjusted" {
26221 #[cfg_attr(
26222 any(target_arch = "aarch64", target_arch = "arm64ec"),
26223 link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
26224 )]
26225 fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
26226 }
26227 _vst1_f64_x3(b.0, b.1, b.2, a)
26228}
26229#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
26231#[doc = "## Safety"]
26232#[doc = " * Neon intrinsic unsafe"]
26233#[inline(always)]
26234#[target_feature(enable = "neon")]
26235#[cfg_attr(test, assert_instr(st1))]
26236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26237pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
26238 unsafe extern "unadjusted" {
26239 #[cfg_attr(
26240 any(target_arch = "aarch64", target_arch = "arm64ec"),
26241 link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
26242 )]
26243 fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
26244 }
26245 _vst1q_f64_x3(b.0, b.1, b.2, a)
26246}
26247#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
26249#[doc = "## Safety"]
26250#[doc = " * Neon intrinsic unsafe"]
26251#[inline(always)]
26252#[target_feature(enable = "neon")]
26253#[cfg_attr(test, assert_instr(st1))]
26254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26255pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
26256 unsafe extern "unadjusted" {
26257 #[cfg_attr(
26258 any(target_arch = "aarch64", target_arch = "arm64ec"),
26259 link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
26260 )]
26261 fn _vst1_f64_x4(
26262 a: float64x1_t,
26263 b: float64x1_t,
26264 c: float64x1_t,
26265 d: float64x1_t,
26266 ptr: *mut f64,
26267 );
26268 }
26269 _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
26270}
26271#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
26273#[doc = "## Safety"]
26274#[doc = " * Neon intrinsic unsafe"]
26275#[inline(always)]
26276#[target_feature(enable = "neon")]
26277#[cfg_attr(test, assert_instr(st1))]
26278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26279pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
26280 unsafe extern "unadjusted" {
26281 #[cfg_attr(
26282 any(target_arch = "aarch64", target_arch = "arm64ec"),
26283 link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
26284 )]
26285 fn _vst1q_f64_x4(
26286 a: float64x2_t,
26287 b: float64x2_t,
26288 c: float64x2_t,
26289 d: float64x2_t,
26290 ptr: *mut f64,
26291 );
26292 }
26293 _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
26294}
26295#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
26297#[doc = "## Safety"]
26298#[doc = " * Neon intrinsic unsafe"]
26299#[inline(always)]
26300#[target_feature(enable = "neon")]
26301#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26302#[rustc_legacy_const_generics(2)]
26303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26304pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
26305 static_assert!(LANE == 0);
26306 *a = simd_extract!(b, LANE as u32);
26307}
26308#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
26310#[doc = "## Safety"]
26311#[doc = " * Neon intrinsic unsafe"]
26312#[inline(always)]
26313#[target_feature(enable = "neon")]
26314#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26315#[rustc_legacy_const_generics(2)]
26316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26317pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
26318 static_assert_uimm_bits!(LANE, 1);
26319 *a = simd_extract!(b, LANE as u32);
26320}
26321#[doc = "Store multiple 2-element structures from two registers"]
26322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
26323#[doc = "## Safety"]
26324#[doc = " * Neon intrinsic unsafe"]
26325#[inline(always)]
26326#[target_feature(enable = "neon")]
26327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26328#[cfg_attr(test, assert_instr(st1))]
26329pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
26330 unsafe extern "unadjusted" {
26331 #[cfg_attr(
26332 any(target_arch = "aarch64", target_arch = "arm64ec"),
26333 link_name = "llvm.aarch64.neon.st2.v1f64.p0"
26334 )]
26335 fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
26336 }
26337 _vst2_f64(b.0, b.1, a as _)
26338}
26339#[doc = "Store multiple 2-element structures from two registers"]
26340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
26341#[doc = "## Safety"]
26342#[doc = " * Neon intrinsic unsafe"]
26343#[inline(always)]
26344#[target_feature(enable = "neon")]
26345#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26346#[rustc_legacy_const_generics(2)]
26347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26348pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
26349 static_assert!(LANE == 0);
26350 unsafe extern "unadjusted" {
26351 #[cfg_attr(
26352 any(target_arch = "aarch64", target_arch = "arm64ec"),
26353 link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
26354 )]
26355 fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
26356 }
26357 _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
26358}
26359#[doc = "Store multiple 2-element structures from two registers"]
26360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
26361#[doc = "## Safety"]
26362#[doc = " * Neon intrinsic unsafe"]
26363#[inline(always)]
26364#[target_feature(enable = "neon")]
26365#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26366#[rustc_legacy_const_generics(2)]
26367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26368pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
26369 static_assert!(LANE == 0);
26370 unsafe extern "unadjusted" {
26371 #[cfg_attr(
26372 any(target_arch = "aarch64", target_arch = "arm64ec"),
26373 link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
26374 )]
26375 fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
26376 }
26377 _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
26378}
26379#[doc = "Store multiple 2-element structures from two registers"]
26380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
26381#[doc = "## Safety"]
26382#[doc = " * Neon intrinsic unsafe"]
26383#[inline(always)]
26384#[target_feature(enable = "neon,aes")]
26385#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26386#[rustc_legacy_const_generics(2)]
26387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26388pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
26389 static_assert!(LANE == 0);
26390 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26391}
26392#[doc = "Store multiple 2-element structures from two registers"]
26393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
26394#[doc = "## Safety"]
26395#[doc = " * Neon intrinsic unsafe"]
26396#[inline(always)]
26397#[target_feature(enable = "neon")]
26398#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26399#[rustc_legacy_const_generics(2)]
26400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26401pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
26402 static_assert!(LANE == 0);
26403 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26404}
26405#[doc = "Store multiple 2-element structures from two registers"]
26406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
26407#[doc = "## Safety"]
26408#[doc = " * Neon intrinsic unsafe"]
26409#[inline(always)]
26410#[target_feature(enable = "neon")]
26411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26412#[cfg_attr(test, assert_instr(st2))]
26413pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
26414 unsafe extern "unadjusted" {
26415 #[cfg_attr(
26416 any(target_arch = "aarch64", target_arch = "arm64ec"),
26417 link_name = "llvm.aarch64.neon.st2.v2f64.p0"
26418 )]
26419 fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26420 }
26421 _vst2q_f64(b.0, b.1, a as _)
26422}
26423#[doc = "Store multiple 2-element structures from two registers"]
26424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26425#[doc = "## Safety"]
26426#[doc = " * Neon intrinsic unsafe"]
26427#[inline(always)]
26428#[target_feature(enable = "neon")]
26429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26430#[cfg_attr(test, assert_instr(st2))]
26431pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26432 unsafe extern "unadjusted" {
26433 #[cfg_attr(
26434 any(target_arch = "aarch64", target_arch = "arm64ec"),
26435 link_name = "llvm.aarch64.neon.st2.v2i64.p0"
26436 )]
26437 fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26438 }
26439 _vst2q_s64(b.0, b.1, a as _)
26440}
26441#[doc = "Store multiple 2-element structures from two registers"]
26442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26443#[doc = "## Safety"]
26444#[doc = " * Neon intrinsic unsafe"]
26445#[inline(always)]
26446#[target_feature(enable = "neon")]
26447#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26448#[rustc_legacy_const_generics(2)]
26449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26450pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26451 static_assert_uimm_bits!(LANE, 1);
26452 unsafe extern "unadjusted" {
26453 #[cfg_attr(
26454 any(target_arch = "aarch64", target_arch = "arm64ec"),
26455 link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
26456 )]
26457 fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26458 }
26459 _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26460}
26461#[doc = "Store multiple 2-element structures from two registers"]
26462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26463#[doc = "## Safety"]
26464#[doc = " * Neon intrinsic unsafe"]
26465#[inline(always)]
26466#[target_feature(enable = "neon")]
26467#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26468#[rustc_legacy_const_generics(2)]
26469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26470pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26471 static_assert_uimm_bits!(LANE, 4);
26472 unsafe extern "unadjusted" {
26473 #[cfg_attr(
26474 any(target_arch = "aarch64", target_arch = "arm64ec"),
26475 link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
26476 )]
26477 fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26478 }
26479 _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26480}
26481#[doc = "Store multiple 2-element structures from two registers"]
26482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26483#[doc = "## Safety"]
26484#[doc = " * Neon intrinsic unsafe"]
26485#[inline(always)]
26486#[target_feature(enable = "neon")]
26487#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26488#[rustc_legacy_const_generics(2)]
26489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26490pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26491 static_assert_uimm_bits!(LANE, 1);
26492 unsafe extern "unadjusted" {
26493 #[cfg_attr(
26494 any(target_arch = "aarch64", target_arch = "arm64ec"),
26495 link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
26496 )]
26497 fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26498 }
26499 _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26500}
26501#[doc = "Store multiple 2-element structures from two registers"]
26502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26503#[doc = "## Safety"]
26504#[doc = " * Neon intrinsic unsafe"]
26505#[inline(always)]
26506#[target_feature(enable = "neon,aes")]
26507#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26508#[rustc_legacy_const_generics(2)]
26509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26510pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26511 static_assert_uimm_bits!(LANE, 1);
26512 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26513}
26514#[doc = "Store multiple 2-element structures from two registers"]
26515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26516#[doc = "## Safety"]
26517#[doc = " * Neon intrinsic unsafe"]
26518#[inline(always)]
26519#[target_feature(enable = "neon")]
26520#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26521#[rustc_legacy_const_generics(2)]
26522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26523pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26524 static_assert_uimm_bits!(LANE, 4);
26525 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26526}
26527#[doc = "Store multiple 2-element structures from two registers"]
26528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26529#[doc = "## Safety"]
26530#[doc = " * Neon intrinsic unsafe"]
26531#[inline(always)]
26532#[target_feature(enable = "neon")]
26533#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26534#[rustc_legacy_const_generics(2)]
26535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26536pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26537 static_assert_uimm_bits!(LANE, 1);
26538 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26539}
26540#[doc = "Store multiple 2-element structures from two registers"]
26541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26542#[doc = "## Safety"]
26543#[doc = " * Neon intrinsic unsafe"]
26544#[inline(always)]
26545#[target_feature(enable = "neon")]
26546#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26547#[rustc_legacy_const_generics(2)]
26548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26549pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26550 static_assert_uimm_bits!(LANE, 4);
26551 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26552}
26553#[doc = "Store multiple 2-element structures from two registers"]
26554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26555#[doc = "## Safety"]
26556#[doc = " * Neon intrinsic unsafe"]
26557#[inline(always)]
26558#[target_feature(enable = "neon,aes")]
26559#[cfg_attr(test, assert_instr(st2))]
26560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26561pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26562 vst2q_s64(transmute(a), transmute(b))
26563}
26564#[doc = "Store multiple 2-element structures from two registers"]
26565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26566#[doc = "## Safety"]
26567#[doc = " * Neon intrinsic unsafe"]
26568#[inline(always)]
26569#[target_feature(enable = "neon")]
26570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26571#[cfg_attr(test, assert_instr(st2))]
26572pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26573 vst2q_s64(transmute(a), transmute(b))
26574}
26575#[doc = "Store multiple 3-element structures from three registers"]
26576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26577#[doc = "## Safety"]
26578#[doc = " * Neon intrinsic unsafe"]
26579#[inline(always)]
26580#[target_feature(enable = "neon")]
26581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26582#[cfg_attr(test, assert_instr(nop))]
26583pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26584 unsafe extern "unadjusted" {
26585 #[cfg_attr(
26586 any(target_arch = "aarch64", target_arch = "arm64ec"),
26587 link_name = "llvm.aarch64.neon.st3.v1f64.p0"
26588 )]
26589 fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26590 }
26591 _vst3_f64(b.0, b.1, b.2, a as _)
26592}
26593#[doc = "Store multiple 3-element structures from three registers"]
26594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26595#[doc = "## Safety"]
26596#[doc = " * Neon intrinsic unsafe"]
26597#[inline(always)]
26598#[target_feature(enable = "neon")]
26599#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26600#[rustc_legacy_const_generics(2)]
26601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26602pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26603 static_assert!(LANE == 0);
26604 unsafe extern "unadjusted" {
26605 #[cfg_attr(
26606 any(target_arch = "aarch64", target_arch = "arm64ec"),
26607 link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
26608 )]
26609 fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26610 }
26611 _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26612}
26613#[doc = "Store multiple 3-element structures from three registers"]
26614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26615#[doc = "## Safety"]
26616#[doc = " * Neon intrinsic unsafe"]
26617#[inline(always)]
26618#[target_feature(enable = "neon")]
26619#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26620#[rustc_legacy_const_generics(2)]
26621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26622pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26623 static_assert!(LANE == 0);
26624 unsafe extern "unadjusted" {
26625 #[cfg_attr(
26626 any(target_arch = "aarch64", target_arch = "arm64ec"),
26627 link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
26628 )]
26629 fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26630 }
26631 _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26632}
26633#[doc = "Store multiple 3-element structures from three registers"]
26634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26635#[doc = "## Safety"]
26636#[doc = " * Neon intrinsic unsafe"]
26637#[inline(always)]
26638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26639#[target_feature(enable = "neon,aes")]
26640#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26641#[rustc_legacy_const_generics(2)]
26642pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26643 static_assert!(LANE == 0);
26644 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26645}
26646#[doc = "Store multiple 3-element structures from three registers"]
26647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26648#[doc = "## Safety"]
26649#[doc = " * Neon intrinsic unsafe"]
26650#[inline(always)]
26651#[target_feature(enable = "neon")]
26652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26653#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26654#[rustc_legacy_const_generics(2)]
26655pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26656 static_assert!(LANE == 0);
26657 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26658}
26659#[doc = "Store multiple 3-element structures from three registers"]
26660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26661#[doc = "## Safety"]
26662#[doc = " * Neon intrinsic unsafe"]
26663#[inline(always)]
26664#[target_feature(enable = "neon")]
26665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26666#[cfg_attr(test, assert_instr(st3))]
26667pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26668 unsafe extern "unadjusted" {
26669 #[cfg_attr(
26670 any(target_arch = "aarch64", target_arch = "arm64ec"),
26671 link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26672 )]
26673 fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26674 }
26675 _vst3q_f64(b.0, b.1, b.2, a as _)
26676}
26677#[doc = "Store multiple 3-element structures from three registers"]
26678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26679#[doc = "## Safety"]
26680#[doc = " * Neon intrinsic unsafe"]
26681#[inline(always)]
26682#[target_feature(enable = "neon")]
26683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26684#[cfg_attr(test, assert_instr(st3))]
26685pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26686 unsafe extern "unadjusted" {
26687 #[cfg_attr(
26688 any(target_arch = "aarch64", target_arch = "arm64ec"),
26689 link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26690 )]
26691 fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26692 }
26693 _vst3q_s64(b.0, b.1, b.2, a as _)
26694}
26695#[doc = "Store multiple 3-element structures from three registers"]
26696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26697#[doc = "## Safety"]
26698#[doc = " * Neon intrinsic unsafe"]
26699#[inline(always)]
26700#[target_feature(enable = "neon")]
26701#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26702#[rustc_legacy_const_generics(2)]
26703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26704pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26705 static_assert_uimm_bits!(LANE, 1);
26706 unsafe extern "unadjusted" {
26707 #[cfg_attr(
26708 any(target_arch = "aarch64", target_arch = "arm64ec"),
26709 link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26710 )]
26711 fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26712 }
26713 _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26714}
26715#[doc = "Store multiple 3-element structures from three registers"]
26716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26717#[doc = "## Safety"]
26718#[doc = " * Neon intrinsic unsafe"]
26719#[inline(always)]
26720#[target_feature(enable = "neon")]
26721#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26722#[rustc_legacy_const_generics(2)]
26723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26724pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26725 static_assert_uimm_bits!(LANE, 4);
26726 unsafe extern "unadjusted" {
26727 #[cfg_attr(
26728 any(target_arch = "aarch64", target_arch = "arm64ec"),
26729 link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26730 )]
26731 fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26732 }
26733 _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26734}
26735#[doc = "Store multiple 3-element structures from three registers"]
26736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26737#[doc = "## Safety"]
26738#[doc = " * Neon intrinsic unsafe"]
26739#[inline(always)]
26740#[target_feature(enable = "neon")]
26741#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26742#[rustc_legacy_const_generics(2)]
26743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26744pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26745 static_assert_uimm_bits!(LANE, 1);
26746 unsafe extern "unadjusted" {
26747 #[cfg_attr(
26748 any(target_arch = "aarch64", target_arch = "arm64ec"),
26749 link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26750 )]
26751 fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26752 }
26753 _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26754}
26755#[doc = "Store multiple 3-element structures from three registers"]
26756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26757#[doc = "## Safety"]
26758#[doc = " * Neon intrinsic unsafe"]
26759#[inline(always)]
26760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26761#[target_feature(enable = "neon,aes")]
26762#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26763#[rustc_legacy_const_generics(2)]
26764pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26765 static_assert_uimm_bits!(LANE, 1);
26766 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26767}
26768#[doc = "Store multiple 3-element structures from three registers"]
26769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26770#[doc = "## Safety"]
26771#[doc = " * Neon intrinsic unsafe"]
26772#[inline(always)]
26773#[target_feature(enable = "neon")]
26774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26775#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26776#[rustc_legacy_const_generics(2)]
26777pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26778 static_assert_uimm_bits!(LANE, 4);
26779 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26780}
26781#[doc = "Store multiple 3-element structures from three registers"]
26782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26783#[doc = "## Safety"]
26784#[doc = " * Neon intrinsic unsafe"]
26785#[inline(always)]
26786#[target_feature(enable = "neon")]
26787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26788#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26789#[rustc_legacy_const_generics(2)]
26790pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26791 static_assert_uimm_bits!(LANE, 1);
26792 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26793}
26794#[doc = "Store multiple 3-element structures from three registers"]
26795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26796#[doc = "## Safety"]
26797#[doc = " * Neon intrinsic unsafe"]
26798#[inline(always)]
26799#[target_feature(enable = "neon")]
26800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26801#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26802#[rustc_legacy_const_generics(2)]
26803pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26804 static_assert_uimm_bits!(LANE, 4);
26805 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26806}
26807#[doc = "Store multiple 3-element structures from three registers"]
26808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26809#[doc = "## Safety"]
26810#[doc = " * Neon intrinsic unsafe"]
26811#[inline(always)]
26812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26813#[target_feature(enable = "neon,aes")]
26814#[cfg_attr(test, assert_instr(st3))]
26815pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26816 vst3q_s64(transmute(a), transmute(b))
26817}
26818#[doc = "Store multiple 3-element structures from three registers"]
26819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26820#[doc = "## Safety"]
26821#[doc = " * Neon intrinsic unsafe"]
26822#[inline(always)]
26823#[target_feature(enable = "neon")]
26824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26825#[cfg_attr(test, assert_instr(st3))]
26826pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26827 vst3q_s64(transmute(a), transmute(b))
26828}
26829#[doc = "Store multiple 4-element structures from four registers"]
26830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26831#[doc = "## Safety"]
26832#[doc = " * Neon intrinsic unsafe"]
26833#[inline(always)]
26834#[target_feature(enable = "neon")]
26835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26836#[cfg_attr(test, assert_instr(nop))]
26837pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26838 unsafe extern "unadjusted" {
26839 #[cfg_attr(
26840 any(target_arch = "aarch64", target_arch = "arm64ec"),
26841 link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26842 )]
26843 fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26844 }
26845 _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26846}
26847#[doc = "Store multiple 4-element structures from four registers"]
26848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26849#[doc = "## Safety"]
26850#[doc = " * Neon intrinsic unsafe"]
26851#[inline(always)]
26852#[target_feature(enable = "neon")]
26853#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26854#[rustc_legacy_const_generics(2)]
26855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26856pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26857 static_assert!(LANE == 0);
26858 unsafe extern "unadjusted" {
26859 #[cfg_attr(
26860 any(target_arch = "aarch64", target_arch = "arm64ec"),
26861 link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26862 )]
26863 fn _vst4_lane_f64(
26864 a: float64x1_t,
26865 b: float64x1_t,
26866 c: float64x1_t,
26867 d: float64x1_t,
26868 n: i64,
26869 ptr: *mut i8,
26870 );
26871 }
26872 _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26873}
26874#[doc = "Store multiple 4-element structures from four registers"]
26875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26876#[doc = "## Safety"]
26877#[doc = " * Neon intrinsic unsafe"]
26878#[inline(always)]
26879#[target_feature(enable = "neon")]
26880#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26881#[rustc_legacy_const_generics(2)]
26882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26883pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26884 static_assert!(LANE == 0);
26885 unsafe extern "unadjusted" {
26886 #[cfg_attr(
26887 any(target_arch = "aarch64", target_arch = "arm64ec"),
26888 link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26889 )]
26890 fn _vst4_lane_s64(
26891 a: int64x1_t,
26892 b: int64x1_t,
26893 c: int64x1_t,
26894 d: int64x1_t,
26895 n: i64,
26896 ptr: *mut i8,
26897 );
26898 }
26899 _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26900}
26901#[doc = "Store multiple 4-element structures from four registers"]
26902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26903#[doc = "## Safety"]
26904#[doc = " * Neon intrinsic unsafe"]
26905#[inline(always)]
26906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26907#[target_feature(enable = "neon,aes")]
26908#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26909#[rustc_legacy_const_generics(2)]
26910pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26911 static_assert!(LANE == 0);
26912 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26913}
26914#[doc = "Store multiple 4-element structures from four registers"]
26915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26916#[doc = "## Safety"]
26917#[doc = " * Neon intrinsic unsafe"]
26918#[inline(always)]
26919#[target_feature(enable = "neon")]
26920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26921#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26922#[rustc_legacy_const_generics(2)]
26923pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26924 static_assert!(LANE == 0);
26925 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26926}
26927#[doc = "Store multiple 4-element structures from four registers"]
26928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26929#[doc = "## Safety"]
26930#[doc = " * Neon intrinsic unsafe"]
26931#[inline(always)]
26932#[target_feature(enable = "neon")]
26933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26934#[cfg_attr(test, assert_instr(st4))]
26935pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26936 unsafe extern "unadjusted" {
26937 #[cfg_attr(
26938 any(target_arch = "aarch64", target_arch = "arm64ec"),
26939 link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26940 )]
26941 fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26942 }
26943 _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26944}
26945#[doc = "Store multiple 4-element structures from four registers"]
26946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26947#[doc = "## Safety"]
26948#[doc = " * Neon intrinsic unsafe"]
26949#[inline(always)]
26950#[target_feature(enable = "neon")]
26951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26952#[cfg_attr(test, assert_instr(st4))]
26953pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26954 unsafe extern "unadjusted" {
26955 #[cfg_attr(
26956 any(target_arch = "aarch64", target_arch = "arm64ec"),
26957 link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26958 )]
26959 fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26960 }
26961 _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26962}
26963#[doc = "Store multiple 4-element structures from four registers"]
26964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26965#[doc = "## Safety"]
26966#[doc = " * Neon intrinsic unsafe"]
26967#[inline(always)]
26968#[target_feature(enable = "neon")]
26969#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26970#[rustc_legacy_const_generics(2)]
26971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26972pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26973 static_assert_uimm_bits!(LANE, 1);
26974 unsafe extern "unadjusted" {
26975 #[cfg_attr(
26976 any(target_arch = "aarch64", target_arch = "arm64ec"),
26977 link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26978 )]
26979 fn _vst4q_lane_f64(
26980 a: float64x2_t,
26981 b: float64x2_t,
26982 c: float64x2_t,
26983 d: float64x2_t,
26984 n: i64,
26985 ptr: *mut i8,
26986 );
26987 }
26988 _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26989}
26990#[doc = "Store multiple 4-element structures from four registers"]
26991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26992#[doc = "## Safety"]
26993#[doc = " * Neon intrinsic unsafe"]
26994#[inline(always)]
26995#[target_feature(enable = "neon")]
26996#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26997#[rustc_legacy_const_generics(2)]
26998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26999pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
27000 static_assert_uimm_bits!(LANE, 4);
27001 unsafe extern "unadjusted" {
27002 #[cfg_attr(
27003 any(target_arch = "aarch64", target_arch = "arm64ec"),
27004 link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
27005 )]
27006 fn _vst4q_lane_s8(
27007 a: int8x16_t,
27008 b: int8x16_t,
27009 c: int8x16_t,
27010 d: int8x16_t,
27011 n: i64,
27012 ptr: *mut i8,
27013 );
27014 }
27015 _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27016}
27017#[doc = "Store multiple 4-element structures from four registers"]
27018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
27019#[doc = "## Safety"]
27020#[doc = " * Neon intrinsic unsafe"]
27021#[inline(always)]
27022#[target_feature(enable = "neon")]
27023#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27024#[rustc_legacy_const_generics(2)]
27025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27026pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
27027 static_assert_uimm_bits!(LANE, 1);
27028 unsafe extern "unadjusted" {
27029 #[cfg_attr(
27030 any(target_arch = "aarch64", target_arch = "arm64ec"),
27031 link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
27032 )]
27033 fn _vst4q_lane_s64(
27034 a: int64x2_t,
27035 b: int64x2_t,
27036 c: int64x2_t,
27037 d: int64x2_t,
27038 n: i64,
27039 ptr: *mut i8,
27040 );
27041 }
27042 _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27043}
27044#[doc = "Store multiple 4-element structures from four registers"]
27045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
27046#[doc = "## Safety"]
27047#[doc = " * Neon intrinsic unsafe"]
27048#[inline(always)]
27049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27050#[target_feature(enable = "neon,aes")]
27051#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27052#[rustc_legacy_const_generics(2)]
27053pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
27054 static_assert_uimm_bits!(LANE, 1);
27055 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27056}
27057#[doc = "Store multiple 4-element structures from four registers"]
27058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
27059#[doc = "## Safety"]
27060#[doc = " * Neon intrinsic unsafe"]
27061#[inline(always)]
27062#[target_feature(enable = "neon")]
27063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27064#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27065#[rustc_legacy_const_generics(2)]
27066pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
27067 static_assert_uimm_bits!(LANE, 4);
27068 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27069}
27070#[doc = "Store multiple 4-element structures from four registers"]
27071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
27072#[doc = "## Safety"]
27073#[doc = " * Neon intrinsic unsafe"]
27074#[inline(always)]
27075#[target_feature(enable = "neon")]
27076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27077#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27078#[rustc_legacy_const_generics(2)]
27079pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
27080 static_assert_uimm_bits!(LANE, 1);
27081 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27082}
27083#[doc = "Store multiple 4-element structures from four registers"]
27084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
27085#[doc = "## Safety"]
27086#[doc = " * Neon intrinsic unsafe"]
27087#[inline(always)]
27088#[target_feature(enable = "neon")]
27089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27090#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27091#[rustc_legacy_const_generics(2)]
27092pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
27093 static_assert_uimm_bits!(LANE, 4);
27094 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27095}
27096#[doc = "Store multiple 4-element structures from four registers"]
27097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
27098#[doc = "## Safety"]
27099#[doc = " * Neon intrinsic unsafe"]
27100#[inline(always)]
27101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27102#[target_feature(enable = "neon,aes")]
27103#[cfg_attr(test, assert_instr(st4))]
27104pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
27105 vst4q_s64(transmute(a), transmute(b))
27106}
27107#[doc = "Store multiple 4-element structures from four registers"]
27108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
27109#[doc = "## Safety"]
27110#[doc = " * Neon intrinsic unsafe"]
27111#[inline(always)]
27112#[target_feature(enable = "neon")]
27113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27114#[cfg_attr(test, assert_instr(st4))]
27115pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
27116 vst4q_s64(transmute(a), transmute(b))
27117}
27118#[doc = "Store-Release a single-element structure from one lane of one register."]
27119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
27120#[inline(always)]
27121#[target_feature(enable = "neon,rcpc3")]
27122#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27123#[rustc_legacy_const_generics(2)]
27124#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27125pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
27126 static_assert!(LANE == 0);
27127 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27128}
27129#[doc = "Store-Release a single-element structure from one lane of one register."]
27130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
27131#[inline(always)]
27132#[target_feature(enable = "neon,rcpc3")]
27133#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27134#[rustc_legacy_const_generics(2)]
27135#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27136pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
27137 static_assert_uimm_bits!(LANE, 1);
27138 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27139}
27140#[doc = "Store-Release a single-element structure from one lane of one register."]
27141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
27142#[inline(always)]
27143#[target_feature(enable = "neon,rcpc3")]
27144#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27145#[rustc_legacy_const_generics(2)]
27146#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27147pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
27148 static_assert!(LANE == 0);
27149 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27150}
27151#[doc = "Store-Release a single-element structure from one lane of one register."]
27152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
27153#[inline(always)]
27154#[target_feature(enable = "neon,rcpc3")]
27155#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27156#[rustc_legacy_const_generics(2)]
27157#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27158pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
27159 static_assert_uimm_bits!(LANE, 1);
27160 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27161}
27162#[doc = "Store-Release a single-element structure from one lane of one register."]
27163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
27164#[inline(always)]
27165#[target_feature(enable = "neon,rcpc3")]
27166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27167#[rustc_legacy_const_generics(2)]
27168#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27169pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
27170 static_assert!(LANE == 0);
27171 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27172}
27173#[doc = "Store-Release a single-element structure from one lane of one register."]
27174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
27175#[inline(always)]
27176#[target_feature(enable = "neon,rcpc3")]
27177#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27178#[rustc_legacy_const_generics(2)]
27179#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27180pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
27181 static_assert_uimm_bits!(LANE, 1);
27182 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27183}
27184#[doc = "Store-Release a single-element structure from one lane of one register."]
27185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
27186#[inline(always)]
27187#[target_feature(enable = "neon,rcpc3")]
27188#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27189#[rustc_legacy_const_generics(2)]
27190#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27191pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
27192 static_assert!(LANE == 0);
27193 let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
27194 unsafe {
27195 let lane: i64 = simd_extract!(val, LANE as u32);
27196 (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
27197 }
27198}
27199#[doc = "Store-Release a single-element structure from one lane of one register."]
27200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
27201#[inline(always)]
27202#[target_feature(enable = "neon,rcpc3")]
27203#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27204#[rustc_legacy_const_generics(2)]
27205#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27206pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
27207 static_assert_uimm_bits!(LANE, 1);
27208 let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
27209 unsafe {
27210 let lane: i64 = simd_extract!(val, LANE as u32);
27211 (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
27212 }
27213}
27214#[doc = "Subtract"]
27215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
27216#[inline(always)]
27217#[target_feature(enable = "neon")]
27218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27219#[cfg_attr(test, assert_instr(fsub))]
27220pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
27221 unsafe { simd_sub(a, b) }
27222}
27223#[doc = "Subtract"]
27224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
27225#[inline(always)]
27226#[target_feature(enable = "neon")]
27227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27228#[cfg_attr(test, assert_instr(fsub))]
27229pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27230 unsafe { simd_sub(a, b) }
27231}
27232#[doc = "Subtract"]
27233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
27234#[inline(always)]
27235#[target_feature(enable = "neon")]
27236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27237#[cfg_attr(test, assert_instr(sub))]
27238pub fn vsubd_s64(a: i64, b: i64) -> i64 {
27239 a.wrapping_sub(b)
27240}
27241#[doc = "Subtract"]
27242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
27243#[inline(always)]
27244#[target_feature(enable = "neon")]
27245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27246#[cfg_attr(test, assert_instr(sub))]
27247pub fn vsubd_u64(a: u64, b: u64) -> u64 {
27248 a.wrapping_sub(b)
27249}
27250#[doc = "Subtract"]
27251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
27252#[inline(always)]
27253#[target_feature(enable = "neon,fp16")]
27254#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27255#[cfg(not(target_arch = "arm64ec"))]
27256#[cfg_attr(test, assert_instr(fsub))]
27257pub fn vsubh_f16(a: f16, b: f16) -> f16 {
27258 a - b
27259}
27260#[doc = "Signed Subtract Long"]
27261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
27262#[inline(always)]
27263#[target_feature(enable = "neon")]
27264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27265#[cfg_attr(test, assert_instr(ssubl2))]
27266pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
27267 unsafe {
27268 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27269 let d: int16x8_t = simd_cast(c);
27270 let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27271 let f: int16x8_t = simd_cast(e);
27272 simd_sub(d, f)
27273 }
27274}
27275#[doc = "Signed Subtract Long"]
27276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
27277#[inline(always)]
27278#[target_feature(enable = "neon")]
27279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27280#[cfg_attr(test, assert_instr(ssubl2))]
27281pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
27282 unsafe {
27283 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27284 let d: int32x4_t = simd_cast(c);
27285 let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27286 let f: int32x4_t = simd_cast(e);
27287 simd_sub(d, f)
27288 }
27289}
27290#[doc = "Signed Subtract Long"]
27291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
27292#[inline(always)]
27293#[target_feature(enable = "neon")]
27294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27295#[cfg_attr(test, assert_instr(ssubl2))]
27296pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
27297 unsafe {
27298 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
27299 let d: int64x2_t = simd_cast(c);
27300 let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27301 let f: int64x2_t = simd_cast(e);
27302 simd_sub(d, f)
27303 }
27304}
27305#[doc = "Unsigned Subtract Long"]
27306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
27307#[inline(always)]
27308#[target_feature(enable = "neon")]
27309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27310#[cfg_attr(test, assert_instr(usubl2))]
27311pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
27312 unsafe {
27313 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27314 let d: uint16x8_t = simd_cast(c);
27315 let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27316 let f: uint16x8_t = simd_cast(e);
27317 simd_sub(d, f)
27318 }
27319}
27320#[doc = "Unsigned Subtract Long"]
27321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
27322#[inline(always)]
27323#[target_feature(enable = "neon")]
27324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27325#[cfg_attr(test, assert_instr(usubl2))]
27326pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
27327 unsafe {
27328 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27329 let d: uint32x4_t = simd_cast(c);
27330 let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27331 let f: uint32x4_t = simd_cast(e);
27332 simd_sub(d, f)
27333 }
27334}
27335#[doc = "Unsigned Subtract Long"]
27336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
27337#[inline(always)]
27338#[target_feature(enable = "neon")]
27339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27340#[cfg_attr(test, assert_instr(usubl2))]
27341pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
27342 unsafe {
27343 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
27344 let d: uint64x2_t = simd_cast(c);
27345 let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27346 let f: uint64x2_t = simd_cast(e);
27347 simd_sub(d, f)
27348 }
27349}
27350#[doc = "Signed Subtract Wide"]
27351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
27352#[inline(always)]
27353#[target_feature(enable = "neon")]
27354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27355#[cfg_attr(test, assert_instr(ssubw2))]
27356pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
27357 unsafe {
27358 let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27359 simd_sub(a, simd_cast(c))
27360 }
27361}
27362#[doc = "Signed Subtract Wide"]
27363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
27364#[inline(always)]
27365#[target_feature(enable = "neon")]
27366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27367#[cfg_attr(test, assert_instr(ssubw2))]
27368pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
27369 unsafe {
27370 let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27371 simd_sub(a, simd_cast(c))
27372 }
27373}
27374#[doc = "Signed Subtract Wide"]
27375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
27376#[inline(always)]
27377#[target_feature(enable = "neon")]
27378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27379#[cfg_attr(test, assert_instr(ssubw2))]
27380pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
27381 unsafe {
27382 let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27383 simd_sub(a, simd_cast(c))
27384 }
27385}
27386#[doc = "Unsigned Subtract Wide"]
27387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
27388#[inline(always)]
27389#[target_feature(enable = "neon")]
27390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27391#[cfg_attr(test, assert_instr(usubw2))]
27392pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
27393 unsafe {
27394 let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27395 simd_sub(a, simd_cast(c))
27396 }
27397}
27398#[doc = "Unsigned Subtract Wide"]
27399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
27400#[inline(always)]
27401#[target_feature(enable = "neon")]
27402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27403#[cfg_attr(test, assert_instr(usubw2))]
27404pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
27405 unsafe {
27406 let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27407 simd_sub(a, simd_cast(c))
27408 }
27409}
27410#[doc = "Unsigned Subtract Wide"]
27411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
27412#[inline(always)]
27413#[target_feature(enable = "neon")]
27414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27415#[cfg_attr(test, assert_instr(usubw2))]
27416pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
27417 unsafe {
27418 let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27419 simd_sub(a, simd_cast(c))
27420 }
27421}
27422#[doc = "Table look-up"]
27423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
27424#[inline(always)]
27425#[target_feature(enable = "neon")]
27426#[cfg_attr(test, assert_instr(tbl))]
27427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27428pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27429 vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
27430 {
27431 transmute(b)
27432 }
27433 })
27434}
27435#[doc = "Table look-up"]
27436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
27437#[inline(always)]
27438#[target_feature(enable = "neon")]
27439#[cfg_attr(test, assert_instr(tbl))]
27440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27441pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27442 vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
27443}
27444#[doc = "Table look-up"]
27445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
27446#[inline(always)]
27447#[target_feature(enable = "neon")]
27448#[cfg_attr(test, assert_instr(tbl))]
27449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27450pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
27451 vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
27452}
27453#[doc = "Table look-up"]
27454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
27455#[inline(always)]
27456#[target_feature(enable = "neon")]
27457#[cfg_attr(test, assert_instr(tbl))]
27458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27459pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
27460 unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
27461}
27462#[doc = "Table look-up"]
27463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27464#[inline(always)]
27465#[cfg(target_endian = "little")]
27466#[target_feature(enable = "neon")]
27467#[cfg_attr(test, assert_instr(tbl))]
27468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27469pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27470 unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
27471}
27472#[doc = "Table look-up"]
27473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27474#[inline(always)]
27475#[cfg(target_endian = "big")]
27476#[target_feature(enable = "neon")]
27477#[cfg_attr(test, assert_instr(tbl))]
27478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27479pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27480 let mut a: uint8x8x2_t = a;
27481 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27482 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27483 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27484 unsafe {
27485 let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27486 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27487 }
27488}
27489#[doc = "Table look-up"]
27490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27491#[inline(always)]
27492#[cfg(target_endian = "little")]
27493#[target_feature(enable = "neon")]
27494#[cfg_attr(test, assert_instr(tbl))]
27495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27496pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27497 unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27498}
27499#[doc = "Table look-up"]
27500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27501#[inline(always)]
27502#[cfg(target_endian = "big")]
27503#[target_feature(enable = "neon")]
27504#[cfg_attr(test, assert_instr(tbl))]
27505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27506pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27507 let mut a: poly8x8x2_t = a;
27508 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27509 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27510 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27511 unsafe {
27512 let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27513 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27514 }
27515}
27516#[doc = "Table look-up"]
27517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27518#[inline(always)]
27519#[target_feature(enable = "neon")]
27520#[cfg_attr(test, assert_instr(tbl))]
27521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27522pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27523 let x = int8x16x2_t(
27524 vcombine_s8(a.0, a.1),
27525 vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27526 );
27527 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27528}
27529#[doc = "Table look-up"]
27530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27531#[inline(always)]
27532#[cfg(target_endian = "little")]
27533#[target_feature(enable = "neon")]
27534#[cfg_attr(test, assert_instr(tbl))]
27535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27536pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27537 let x = uint8x16x2_t(
27538 vcombine_u8(a.0, a.1),
27539 vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27540 );
27541 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27542}
27543#[doc = "Table look-up"]
27544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27545#[inline(always)]
27546#[cfg(target_endian = "big")]
27547#[target_feature(enable = "neon")]
27548#[cfg_attr(test, assert_instr(tbl))]
27549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27550pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27551 let mut a: uint8x8x3_t = a;
27552 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27553 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27554 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27555 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27556 let x = uint8x16x2_t(
27557 vcombine_u8(a.0, a.1),
27558 vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27559 );
27560 unsafe {
27561 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27562 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27563 }
27564}
27565#[doc = "Table look-up"]
27566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27567#[inline(always)]
27568#[cfg(target_endian = "little")]
27569#[target_feature(enable = "neon")]
27570#[cfg_attr(test, assert_instr(tbl))]
27571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27572pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27573 let x = poly8x16x2_t(
27574 vcombine_p8(a.0, a.1),
27575 vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27576 );
27577 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27578}
27579#[doc = "Table look-up"]
27580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27581#[inline(always)]
27582#[cfg(target_endian = "big")]
27583#[target_feature(enable = "neon")]
27584#[cfg_attr(test, assert_instr(tbl))]
27585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27586pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27587 let mut a: poly8x8x3_t = a;
27588 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27589 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27590 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27591 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27592 let x = poly8x16x2_t(
27593 vcombine_p8(a.0, a.1),
27594 vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27595 );
27596 unsafe {
27597 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27598 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27599 }
27600}
27601#[doc = "Table look-up"]
27602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27603#[inline(always)]
27604#[target_feature(enable = "neon")]
27605#[cfg_attr(test, assert_instr(tbl))]
27606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27607pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27608 let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27609 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27610}
27611#[doc = "Table look-up"]
27612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27613#[inline(always)]
27614#[cfg(target_endian = "little")]
27615#[target_feature(enable = "neon")]
27616#[cfg_attr(test, assert_instr(tbl))]
27617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27618pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27619 let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27620 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27621}
27622#[doc = "Table look-up"]
27623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27624#[inline(always)]
27625#[cfg(target_endian = "big")]
27626#[target_feature(enable = "neon")]
27627#[cfg_attr(test, assert_instr(tbl))]
27628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27629pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27630 let mut a: uint8x8x4_t = a;
27631 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27632 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27633 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27634 a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27635 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27636 let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27637 unsafe {
27638 let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27639 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27640 }
27641}
27642#[doc = "Table look-up"]
27643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27644#[inline(always)]
27645#[cfg(target_endian = "little")]
27646#[target_feature(enable = "neon")]
27647#[cfg_attr(test, assert_instr(tbl))]
27648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27649pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27650 let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27651 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27652}
27653#[doc = "Table look-up"]
27654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27655#[inline(always)]
27656#[cfg(target_endian = "big")]
27657#[target_feature(enable = "neon")]
27658#[cfg_attr(test, assert_instr(tbl))]
27659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27660pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27661 let mut a: poly8x8x4_t = a;
27662 a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27663 a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27664 a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27665 a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27666 let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27667 let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27668 unsafe {
27669 let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27670 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27671 }
27672}
27673#[doc = "Extended table look-up"]
27674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27675#[inline(always)]
27676#[target_feature(enable = "neon")]
27677#[cfg_attr(test, assert_instr(tbx))]
27678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27679pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27680 unsafe {
27681 simd_select(
27682 simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27683 transmute(vqtbx1(
27684 transmute(a),
27685 transmute(vcombine_s8(b, crate::mem::zeroed())),
27686 transmute(c),
27687 )),
27688 a,
27689 )
27690 }
27691}
27692#[doc = "Extended table look-up"]
27693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27694#[inline(always)]
27695#[target_feature(enable = "neon")]
27696#[cfg_attr(test, assert_instr(tbx))]
27697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27698pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27699 unsafe {
27700 simd_select(
27701 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27702 transmute(vqtbx1(
27703 transmute(a),
27704 transmute(vcombine_u8(b, crate::mem::zeroed())),
27705 c,
27706 )),
27707 a,
27708 )
27709 }
27710}
27711#[doc = "Extended table look-up"]
27712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27713#[inline(always)]
27714#[target_feature(enable = "neon")]
27715#[cfg_attr(test, assert_instr(tbx))]
27716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27717pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27718 unsafe {
27719 simd_select(
27720 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27721 transmute(vqtbx1(
27722 transmute(a),
27723 transmute(vcombine_p8(b, crate::mem::zeroed())),
27724 c,
27725 )),
27726 a,
27727 )
27728 }
27729}
27730#[doc = "Extended table look-up"]
27731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27732#[inline(always)]
27733#[target_feature(enable = "neon")]
27734#[cfg_attr(test, assert_instr(tbx))]
27735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27736pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27737 unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27738}
27739#[doc = "Extended table look-up"]
27740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27741#[inline(always)]
27742#[cfg(target_endian = "little")]
27743#[target_feature(enable = "neon")]
27744#[cfg_attr(test, assert_instr(tbx))]
27745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27746pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27747 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27748}
27749#[doc = "Extended table look-up"]
27750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27751#[inline(always)]
27752#[cfg(target_endian = "big")]
27753#[target_feature(enable = "neon")]
27754#[cfg_attr(test, assert_instr(tbx))]
27755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27756pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27757 let mut b: uint8x8x2_t = b;
27758 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27759 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27760 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27761 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27762 unsafe {
27763 let ret_val: uint8x8_t =
27764 transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27765 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27766 }
27767}
27768#[doc = "Extended table look-up"]
27769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27770#[inline(always)]
27771#[cfg(target_endian = "little")]
27772#[target_feature(enable = "neon")]
27773#[cfg_attr(test, assert_instr(tbx))]
27774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27775pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27776 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27777}
27778#[doc = "Extended table look-up"]
27779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27780#[inline(always)]
27781#[cfg(target_endian = "big")]
27782#[target_feature(enable = "neon")]
27783#[cfg_attr(test, assert_instr(tbx))]
27784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27785pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27786 let mut b: poly8x8x2_t = b;
27787 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27788 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27789 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27790 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27791 unsafe {
27792 let ret_val: poly8x8_t =
27793 transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27794 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27795 }
27796}
27797#[doc = "Extended table look-up"]
27798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27799#[inline(always)]
27800#[target_feature(enable = "neon")]
27801#[cfg_attr(test, assert_instr(tbx))]
27802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27803pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27804 let x = int8x16x2_t(
27805 vcombine_s8(b.0, b.1),
27806 vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27807 );
27808 unsafe {
27809 transmute(simd_select(
27810 simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27811 transmute(vqtbx2(
27812 transmute(a),
27813 transmute(x.0),
27814 transmute(x.1),
27815 transmute(c),
27816 )),
27817 a,
27818 ))
27819 }
27820}
27821#[doc = "Extended table look-up"]
27822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27823#[inline(always)]
27824#[cfg(target_endian = "little")]
27825#[target_feature(enable = "neon")]
27826#[cfg_attr(test, assert_instr(tbx))]
27827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27828pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27829 let x = uint8x16x2_t(
27830 vcombine_u8(b.0, b.1),
27831 vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27832 );
27833 unsafe {
27834 transmute(simd_select(
27835 simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27836 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27837 a,
27838 ))
27839 }
27840}
27841#[doc = "Extended table look-up"]
27842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27843#[inline(always)]
27844#[cfg(target_endian = "big")]
27845#[target_feature(enable = "neon")]
27846#[cfg_attr(test, assert_instr(tbx))]
27847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27848pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27849 let mut b: uint8x8x3_t = b;
27850 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27851 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27852 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27853 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27854 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27855 let x = uint8x16x2_t(
27856 vcombine_u8(b.0, b.1),
27857 vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27858 );
27859 unsafe {
27860 let ret_val: uint8x8_t = transmute(simd_select(
27861 simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27862 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27863 a,
27864 ));
27865 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27866 }
27867}
27868#[doc = "Extended table look-up"]
27869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27870#[inline(always)]
27871#[cfg(target_endian = "little")]
27872#[target_feature(enable = "neon")]
27873#[cfg_attr(test, assert_instr(tbx))]
27874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27875pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27876 let x = poly8x16x2_t(
27877 vcombine_p8(b.0, b.1),
27878 vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27879 );
27880 unsafe {
27881 transmute(simd_select(
27882 simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27883 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27884 a,
27885 ))
27886 }
27887}
27888#[doc = "Extended table look-up"]
27889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27890#[inline(always)]
27891#[cfg(target_endian = "big")]
27892#[target_feature(enable = "neon")]
27893#[cfg_attr(test, assert_instr(tbx))]
27894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27895pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27896 let mut b: poly8x8x3_t = b;
27897 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27898 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27899 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27900 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27901 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27902 let x = poly8x16x2_t(
27903 vcombine_p8(b.0, b.1),
27904 vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27905 );
27906 unsafe {
27907 let ret_val: poly8x8_t = transmute(simd_select(
27908 simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27909 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27910 a,
27911 ));
27912 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27913 }
27914}
27915#[doc = "Extended table look-up"]
27916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27917#[inline(always)]
27918#[target_feature(enable = "neon")]
27919#[cfg_attr(test, assert_instr(tbx))]
27920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27921pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27922 unsafe {
27923 vqtbx2(
27924 transmute(a),
27925 transmute(vcombine_s8(b.0, b.1)),
27926 transmute(vcombine_s8(b.2, b.3)),
27927 transmute(c),
27928 )
27929 }
27930}
27931#[doc = "Extended table look-up"]
27932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27933#[inline(always)]
27934#[cfg(target_endian = "little")]
27935#[target_feature(enable = "neon")]
27936#[cfg_attr(test, assert_instr(tbx))]
27937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27938pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27939 unsafe {
27940 transmute(vqtbx2(
27941 transmute(a),
27942 transmute(vcombine_u8(b.0, b.1)),
27943 transmute(vcombine_u8(b.2, b.3)),
27944 c,
27945 ))
27946 }
27947}
27948#[doc = "Extended table look-up"]
27949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27950#[inline(always)]
27951#[cfg(target_endian = "big")]
27952#[target_feature(enable = "neon")]
27953#[cfg_attr(test, assert_instr(tbx))]
27954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27955pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27956 let mut b: uint8x8x4_t = b;
27957 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27958 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27959 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27960 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27961 b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27962 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27963 unsafe {
27964 let ret_val: uint8x8_t = transmute(vqtbx2(
27965 transmute(a),
27966 transmute(vcombine_u8(b.0, b.1)),
27967 transmute(vcombine_u8(b.2, b.3)),
27968 c,
27969 ));
27970 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27971 }
27972}
27973#[doc = "Extended table look-up"]
27974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27975#[inline(always)]
27976#[cfg(target_endian = "little")]
27977#[target_feature(enable = "neon")]
27978#[cfg_attr(test, assert_instr(tbx))]
27979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27980pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27981 unsafe {
27982 transmute(vqtbx2(
27983 transmute(a),
27984 transmute(vcombine_p8(b.0, b.1)),
27985 transmute(vcombine_p8(b.2, b.3)),
27986 c,
27987 ))
27988 }
27989}
27990#[doc = "Extended table look-up"]
27991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27992#[inline(always)]
27993#[cfg(target_endian = "big")]
27994#[target_feature(enable = "neon")]
27995#[cfg_attr(test, assert_instr(tbx))]
27996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27997pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27998 let mut b: poly8x8x4_t = b;
27999 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
28000 b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
28001 b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
28002 b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
28003 b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
28004 let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
28005 unsafe {
28006 let ret_val: poly8x8_t = transmute(vqtbx2(
28007 transmute(a),
28008 transmute(vcombine_p8(b.0, b.1)),
28009 transmute(vcombine_p8(b.2, b.3)),
28010 c,
28011 ));
28012 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
28013 }
28014}
28015#[doc = "Transpose vectors"]
28016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
28017#[inline(always)]
28018#[target_feature(enable = "neon,fp16")]
28019#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28020#[cfg(not(target_arch = "arm64ec"))]
28021#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28022pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28023 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28024}
28025#[doc = "Transpose vectors"]
28026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
28027#[inline(always)]
28028#[target_feature(enable = "neon,fp16")]
28029#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28030#[cfg(not(target_arch = "arm64ec"))]
28031#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28032pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28033 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28034}
28035#[doc = "Transpose vectors"]
28036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
28037#[inline(always)]
28038#[target_feature(enable = "neon")]
28039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28041pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28042 unsafe { simd_shuffle!(a, b, [0, 2]) }
28043}
28044#[doc = "Transpose vectors"]
28045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
28046#[inline(always)]
28047#[target_feature(enable = "neon")]
28048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28050pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28051 unsafe { simd_shuffle!(a, b, [0, 2]) }
28052}
28053#[doc = "Transpose vectors"]
28054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
28055#[inline(always)]
28056#[target_feature(enable = "neon")]
28057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28058#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28059pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28060 unsafe { simd_shuffle!(a, b, [0, 2]) }
28061}
28062#[doc = "Transpose vectors"]
28063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
28064#[inline(always)]
28065#[target_feature(enable = "neon")]
28066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28067#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28068pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28069 unsafe { simd_shuffle!(a, b, [0, 2]) }
28070}
28071#[doc = "Transpose vectors"]
28072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
28073#[inline(always)]
28074#[target_feature(enable = "neon")]
28075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28076#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28077pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28078 unsafe { simd_shuffle!(a, b, [0, 2]) }
28079}
28080#[doc = "Transpose vectors"]
28081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
28082#[inline(always)]
28083#[target_feature(enable = "neon")]
28084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28085#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28086pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28087 unsafe { simd_shuffle!(a, b, [0, 2]) }
28088}
28089#[doc = "Transpose vectors"]
28090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
28091#[inline(always)]
28092#[target_feature(enable = "neon")]
28093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28095pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28096 unsafe { simd_shuffle!(a, b, [0, 2]) }
28097}
28098#[doc = "Transpose vectors"]
28099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
28100#[inline(always)]
28101#[target_feature(enable = "neon")]
28102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28103#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28104pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28105 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28106}
28107#[doc = "Transpose vectors"]
28108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
28109#[inline(always)]
28110#[target_feature(enable = "neon")]
28111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28112#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28113pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28114 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28115}
28116#[doc = "Transpose vectors"]
28117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
28118#[inline(always)]
28119#[target_feature(enable = "neon")]
28120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28121#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28122pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28123 unsafe {
28124 simd_shuffle!(
28125 a,
28126 b,
28127 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28128 )
28129 }
28130}
28131#[doc = "Transpose vectors"]
28132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
28133#[inline(always)]
28134#[target_feature(enable = "neon")]
28135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28136#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28137pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28138 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28139}
28140#[doc = "Transpose vectors"]
28141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
28142#[inline(always)]
28143#[target_feature(enable = "neon")]
28144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28145#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28146pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28147 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28148}
28149#[doc = "Transpose vectors"]
28150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
28151#[inline(always)]
28152#[target_feature(enable = "neon")]
28153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28154#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28155pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28156 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28157}
28158#[doc = "Transpose vectors"]
28159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
28160#[inline(always)]
28161#[target_feature(enable = "neon")]
28162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28163#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28164pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28165 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28166}
28167#[doc = "Transpose vectors"]
28168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
28169#[inline(always)]
28170#[target_feature(enable = "neon")]
28171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28172#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28173pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28174 unsafe {
28175 simd_shuffle!(
28176 a,
28177 b,
28178 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28179 )
28180 }
28181}
28182#[doc = "Transpose vectors"]
28183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
28184#[inline(always)]
28185#[target_feature(enable = "neon")]
28186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28187#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28188pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28189 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28190}
28191#[doc = "Transpose vectors"]
28192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
28193#[inline(always)]
28194#[target_feature(enable = "neon")]
28195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28196#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28197pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28198 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28199}
28200#[doc = "Transpose vectors"]
28201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
28202#[inline(always)]
28203#[target_feature(enable = "neon")]
28204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28205#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28206pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28207 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28208}
28209#[doc = "Transpose vectors"]
28210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
28211#[inline(always)]
28212#[target_feature(enable = "neon")]
28213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28214#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28215pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28216 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28217}
28218#[doc = "Transpose vectors"]
28219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
28220#[inline(always)]
28221#[target_feature(enable = "neon")]
28222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28223#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28224pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28225 unsafe {
28226 simd_shuffle!(
28227 a,
28228 b,
28229 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28230 )
28231 }
28232}
28233#[doc = "Transpose vectors"]
28234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
28235#[inline(always)]
28236#[target_feature(enable = "neon")]
28237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28238#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28239pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28240 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28241}
28242#[doc = "Transpose vectors"]
28243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
28244#[inline(always)]
28245#[target_feature(enable = "neon")]
28246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28247#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28248pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28249 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28250}
28251#[doc = "Transpose vectors"]
28252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
28253#[inline(always)]
28254#[target_feature(enable = "neon,fp16")]
28255#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28256#[cfg(not(target_arch = "arm64ec"))]
28257#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28258pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28259 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28260}
28261#[doc = "Transpose vectors"]
28262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
28263#[inline(always)]
28264#[target_feature(enable = "neon,fp16")]
28265#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28266#[cfg(not(target_arch = "arm64ec"))]
28267#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28268pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28269 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28270}
28271#[doc = "Transpose vectors"]
28272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
28273#[inline(always)]
28274#[target_feature(enable = "neon")]
28275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28276#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28277pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28278 unsafe { simd_shuffle!(a, b, [1, 3]) }
28279}
28280#[doc = "Transpose vectors"]
28281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
28282#[inline(always)]
28283#[target_feature(enable = "neon")]
28284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28285#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28286pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28287 unsafe { simd_shuffle!(a, b, [1, 3]) }
28288}
28289#[doc = "Transpose vectors"]
28290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
28291#[inline(always)]
28292#[target_feature(enable = "neon")]
28293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28294#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28295pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28296 unsafe { simd_shuffle!(a, b, [1, 3]) }
28297}
28298#[doc = "Transpose vectors"]
28299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
28300#[inline(always)]
28301#[target_feature(enable = "neon")]
28302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28303#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28304pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28305 unsafe { simd_shuffle!(a, b, [1, 3]) }
28306}
28307#[doc = "Transpose vectors"]
28308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
28309#[inline(always)]
28310#[target_feature(enable = "neon")]
28311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28312#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28313pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28314 unsafe { simd_shuffle!(a, b, [1, 3]) }
28315}
28316#[doc = "Transpose vectors"]
28317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
28318#[inline(always)]
28319#[target_feature(enable = "neon")]
28320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28321#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28322pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28323 unsafe { simd_shuffle!(a, b, [1, 3]) }
28324}
28325#[doc = "Transpose vectors"]
28326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
28327#[inline(always)]
28328#[target_feature(enable = "neon")]
28329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28330#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28331pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28332 unsafe { simd_shuffle!(a, b, [1, 3]) }
28333}
28334#[doc = "Transpose vectors"]
28335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
28336#[inline(always)]
28337#[target_feature(enable = "neon")]
28338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28339#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28340pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28341 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28342}
28343#[doc = "Transpose vectors"]
28344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
28345#[inline(always)]
28346#[target_feature(enable = "neon")]
28347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28348#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28349pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28350 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28351}
28352#[doc = "Transpose vectors"]
28353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
28354#[inline(always)]
28355#[target_feature(enable = "neon")]
28356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28357#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28358pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28359 unsafe {
28360 simd_shuffle!(
28361 a,
28362 b,
28363 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28364 )
28365 }
28366}
28367#[doc = "Transpose vectors"]
28368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
28369#[inline(always)]
28370#[target_feature(enable = "neon")]
28371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28372#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28373pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28374 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28375}
28376#[doc = "Transpose vectors"]
28377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
28378#[inline(always)]
28379#[target_feature(enable = "neon")]
28380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28381#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28382pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28383 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28384}
28385#[doc = "Transpose vectors"]
28386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
28387#[inline(always)]
28388#[target_feature(enable = "neon")]
28389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28390#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28391pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28392 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28393}
28394#[doc = "Transpose vectors"]
28395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
28396#[inline(always)]
28397#[target_feature(enable = "neon")]
28398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28399#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28400pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28401 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28402}
28403#[doc = "Transpose vectors"]
28404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
28405#[inline(always)]
28406#[target_feature(enable = "neon")]
28407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28408#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28409pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28410 unsafe {
28411 simd_shuffle!(
28412 a,
28413 b,
28414 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28415 )
28416 }
28417}
28418#[doc = "Transpose vectors"]
28419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
28420#[inline(always)]
28421#[target_feature(enable = "neon")]
28422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28423#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28424pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28425 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28426}
28427#[doc = "Transpose vectors"]
28428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
28429#[inline(always)]
28430#[target_feature(enable = "neon")]
28431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28432#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28433pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28434 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28435}
28436#[doc = "Transpose vectors"]
28437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
28438#[inline(always)]
28439#[target_feature(enable = "neon")]
28440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28441#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28442pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28443 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28444}
28445#[doc = "Transpose vectors"]
28446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
28447#[inline(always)]
28448#[target_feature(enable = "neon")]
28449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28450#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28451pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28452 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28453}
28454#[doc = "Transpose vectors"]
28455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
28456#[inline(always)]
28457#[target_feature(enable = "neon")]
28458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28459#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28460pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28461 unsafe {
28462 simd_shuffle!(
28463 a,
28464 b,
28465 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28466 )
28467 }
28468}
28469#[doc = "Transpose vectors"]
28470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
28471#[inline(always)]
28472#[target_feature(enable = "neon")]
28473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28474#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28475pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28476 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28477}
28478#[doc = "Transpose vectors"]
28479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
28480#[inline(always)]
28481#[target_feature(enable = "neon")]
28482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28483#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28484pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28485 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28486}
28487#[doc = "Signed compare bitwise Test bits nonzero"]
28488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28489#[inline(always)]
28490#[target_feature(enable = "neon")]
28491#[cfg_attr(test, assert_instr(cmtst))]
28492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28493pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28494 unsafe {
28495 let c: int64x1_t = simd_and(a, b);
28496 let d: i64x1 = i64x1::new(0);
28497 simd_ne(c, transmute(d))
28498 }
28499}
28500#[doc = "Signed compare bitwise Test bits nonzero"]
28501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28502#[inline(always)]
28503#[target_feature(enable = "neon")]
28504#[cfg_attr(test, assert_instr(cmtst))]
28505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28506pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28507 unsafe {
28508 let c: int64x2_t = simd_and(a, b);
28509 let d: i64x2 = i64x2::new(0, 0);
28510 simd_ne(c, transmute(d))
28511 }
28512}
28513#[doc = "Signed compare bitwise Test bits nonzero"]
28514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28515#[inline(always)]
28516#[target_feature(enable = "neon")]
28517#[cfg_attr(test, assert_instr(cmtst))]
28518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28519pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28520 unsafe {
28521 let c: poly64x1_t = simd_and(a, b);
28522 let d: i64x1 = i64x1::new(0);
28523 simd_ne(c, transmute(d))
28524 }
28525}
28526#[doc = "Signed compare bitwise Test bits nonzero"]
28527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28528#[inline(always)]
28529#[target_feature(enable = "neon")]
28530#[cfg_attr(test, assert_instr(cmtst))]
28531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28532pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28533 unsafe {
28534 let c: poly64x2_t = simd_and(a, b);
28535 let d: i64x2 = i64x2::new(0, 0);
28536 simd_ne(c, transmute(d))
28537 }
28538}
28539#[doc = "Unsigned compare bitwise Test bits nonzero"]
28540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28541#[inline(always)]
28542#[target_feature(enable = "neon")]
28543#[cfg_attr(test, assert_instr(cmtst))]
28544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28545pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28546 unsafe {
28547 let c: uint64x1_t = simd_and(a, b);
28548 let d: u64x1 = u64x1::new(0);
28549 simd_ne(c, transmute(d))
28550 }
28551}
28552#[doc = "Unsigned compare bitwise Test bits nonzero"]
28553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28554#[inline(always)]
28555#[target_feature(enable = "neon")]
28556#[cfg_attr(test, assert_instr(cmtst))]
28557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28558pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28559 unsafe {
28560 let c: uint64x2_t = simd_and(a, b);
28561 let d: u64x2 = u64x2::new(0, 0);
28562 simd_ne(c, transmute(d))
28563 }
28564}
28565#[doc = "Compare bitwise test bits nonzero"]
28566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28567#[inline(always)]
28568#[target_feature(enable = "neon")]
28569#[cfg_attr(test, assert_instr(tst))]
28570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28571pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28572 unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28573}
28574#[doc = "Compare bitwise test bits nonzero"]
28575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28576#[inline(always)]
28577#[target_feature(enable = "neon")]
28578#[cfg_attr(test, assert_instr(tst))]
28579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28580pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28581 unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28582}
28583#[doc = "Signed saturating Accumulate of Unsigned value."]
28584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28585#[inline(always)]
28586#[target_feature(enable = "neon")]
28587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28588#[cfg_attr(test, assert_instr(suqadd))]
28589pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28590 unsafe extern "unadjusted" {
28591 #[cfg_attr(
28592 any(target_arch = "aarch64", target_arch = "arm64ec"),
28593 link_name = "llvm.aarch64.neon.suqadd.v8i8"
28594 )]
28595 fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28596 }
28597 unsafe { _vuqadd_s8(a, b) }
28598}
28599#[doc = "Signed saturating Accumulate of Unsigned value."]
28600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28601#[inline(always)]
28602#[target_feature(enable = "neon")]
28603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28604#[cfg_attr(test, assert_instr(suqadd))]
28605pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28606 unsafe extern "unadjusted" {
28607 #[cfg_attr(
28608 any(target_arch = "aarch64", target_arch = "arm64ec"),
28609 link_name = "llvm.aarch64.neon.suqadd.v16i8"
28610 )]
28611 fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28612 }
28613 unsafe { _vuqaddq_s8(a, b) }
28614}
28615#[doc = "Signed saturating Accumulate of Unsigned value."]
28616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28617#[inline(always)]
28618#[target_feature(enable = "neon")]
28619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28620#[cfg_attr(test, assert_instr(suqadd))]
28621pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28622 unsafe extern "unadjusted" {
28623 #[cfg_attr(
28624 any(target_arch = "aarch64", target_arch = "arm64ec"),
28625 link_name = "llvm.aarch64.neon.suqadd.v4i16"
28626 )]
28627 fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28628 }
28629 unsafe { _vuqadd_s16(a, b) }
28630}
28631#[doc = "Signed saturating Accumulate of Unsigned value."]
28632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28633#[inline(always)]
28634#[target_feature(enable = "neon")]
28635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28636#[cfg_attr(test, assert_instr(suqadd))]
28637pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28638 unsafe extern "unadjusted" {
28639 #[cfg_attr(
28640 any(target_arch = "aarch64", target_arch = "arm64ec"),
28641 link_name = "llvm.aarch64.neon.suqadd.v8i16"
28642 )]
28643 fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28644 }
28645 unsafe { _vuqaddq_s16(a, b) }
28646}
28647#[doc = "Signed saturating Accumulate of Unsigned value."]
28648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28649#[inline(always)]
28650#[target_feature(enable = "neon")]
28651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28652#[cfg_attr(test, assert_instr(suqadd))]
28653pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28654 unsafe extern "unadjusted" {
28655 #[cfg_attr(
28656 any(target_arch = "aarch64", target_arch = "arm64ec"),
28657 link_name = "llvm.aarch64.neon.suqadd.v2i32"
28658 )]
28659 fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28660 }
28661 unsafe { _vuqadd_s32(a, b) }
28662}
28663#[doc = "Signed saturating Accumulate of Unsigned value."]
28664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28665#[inline(always)]
28666#[target_feature(enable = "neon")]
28667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28668#[cfg_attr(test, assert_instr(suqadd))]
28669pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28670 unsafe extern "unadjusted" {
28671 #[cfg_attr(
28672 any(target_arch = "aarch64", target_arch = "arm64ec"),
28673 link_name = "llvm.aarch64.neon.suqadd.v4i32"
28674 )]
28675 fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28676 }
28677 unsafe { _vuqaddq_s32(a, b) }
28678}
28679#[doc = "Signed saturating Accumulate of Unsigned value."]
28680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28681#[inline(always)]
28682#[target_feature(enable = "neon")]
28683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28684#[cfg_attr(test, assert_instr(suqadd))]
28685pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28686 unsafe extern "unadjusted" {
28687 #[cfg_attr(
28688 any(target_arch = "aarch64", target_arch = "arm64ec"),
28689 link_name = "llvm.aarch64.neon.suqadd.v1i64"
28690 )]
28691 fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28692 }
28693 unsafe { _vuqadd_s64(a, b) }
28694}
28695#[doc = "Signed saturating Accumulate of Unsigned value."]
28696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28697#[inline(always)]
28698#[target_feature(enable = "neon")]
28699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28700#[cfg_attr(test, assert_instr(suqadd))]
28701pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28702 unsafe extern "unadjusted" {
28703 #[cfg_attr(
28704 any(target_arch = "aarch64", target_arch = "arm64ec"),
28705 link_name = "llvm.aarch64.neon.suqadd.v2i64"
28706 )]
28707 fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28708 }
28709 unsafe { _vuqaddq_s64(a, b) }
28710}
28711#[doc = "Signed saturating accumulate of unsigned value"]
28712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28713#[inline(always)]
28714#[target_feature(enable = "neon")]
28715#[cfg_attr(test, assert_instr(suqadd))]
28716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28717pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28718 unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28719}
28720#[doc = "Signed saturating accumulate of unsigned value"]
28721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28722#[inline(always)]
28723#[target_feature(enable = "neon")]
28724#[cfg_attr(test, assert_instr(suqadd))]
28725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28726pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28727 unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28728}
28729#[doc = "Signed saturating accumulate of unsigned value"]
28730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28731#[inline(always)]
28732#[target_feature(enable = "neon")]
28733#[cfg_attr(test, assert_instr(suqadd))]
28734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28735pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28736 unsafe extern "unadjusted" {
28737 #[cfg_attr(
28738 any(target_arch = "aarch64", target_arch = "arm64ec"),
28739 link_name = "llvm.aarch64.neon.suqadd.i64"
28740 )]
28741 fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28742 }
28743 unsafe { _vuqaddd_s64(a, b) }
28744}
28745#[doc = "Signed saturating accumulate of unsigned value"]
28746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28747#[inline(always)]
28748#[target_feature(enable = "neon")]
28749#[cfg_attr(test, assert_instr(suqadd))]
28750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28751pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28752 unsafe extern "unadjusted" {
28753 #[cfg_attr(
28754 any(target_arch = "aarch64", target_arch = "arm64ec"),
28755 link_name = "llvm.aarch64.neon.suqadd.i32"
28756 )]
28757 fn _vuqadds_s32(a: i32, b: u32) -> i32;
28758 }
28759 unsafe { _vuqadds_s32(a, b) }
28760}
28761#[doc = "Unzip vectors"]
28762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28763#[inline(always)]
28764#[target_feature(enable = "neon,fp16")]
28765#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28766#[cfg(not(target_arch = "arm64ec"))]
28767#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28768pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28769 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28770}
28771#[doc = "Unzip vectors"]
28772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28773#[inline(always)]
28774#[target_feature(enable = "neon,fp16")]
28775#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
28776#[cfg(not(target_arch = "arm64ec"))]
28777#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28778pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28779 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28780}
28781#[doc = "Unzip vectors"]
28782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28783#[inline(always)]
28784#[target_feature(enable = "neon")]
28785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28786#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28787pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28788 unsafe { simd_shuffle!(a, b, [0, 2]) }
28789}
28790#[doc = "Unzip vectors"]
28791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28792#[inline(always)]
28793#[target_feature(enable = "neon")]
28794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28795#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28796pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28797 unsafe { simd_shuffle!(a, b, [0, 2]) }
28798}
28799#[doc = "Unzip vectors"]
28800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28801#[inline(always)]
28802#[target_feature(enable = "neon")]
28803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28804#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28805pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28806 unsafe { simd_shuffle!(a, b, [0, 2]) }
28807}
28808#[doc = "Unzip vectors"]
28809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28810#[inline(always)]
28811#[target_feature(enable = "neon")]
28812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28813#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28814pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28815 unsafe { simd_shuffle!(a, b, [0, 2]) }
28816}
28817#[doc = "Unzip vectors"]
28818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28819#[inline(always)]
28820#[target_feature(enable = "neon")]
28821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28822#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28823pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28824 unsafe { simd_shuffle!(a, b, [0, 2]) }
28825}
28826#[doc = "Unzip vectors"]
28827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28828#[inline(always)]
28829#[target_feature(enable = "neon")]
28830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28831#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28832pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28833 unsafe { simd_shuffle!(a, b, [0, 2]) }
28834}
28835#[doc = "Unzip vectors"]
28836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28837#[inline(always)]
28838#[target_feature(enable = "neon")]
28839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28840#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28841pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28842 unsafe { simd_shuffle!(a, b, [0, 2]) }
28843}
28844#[doc = "Unzip vectors"]
28845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28846#[inline(always)]
28847#[target_feature(enable = "neon")]
28848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28849#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28850pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28851 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28852}
28853#[doc = "Unzip vectors"]
28854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28855#[inline(always)]
28856#[target_feature(enable = "neon")]
28857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28859pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28860 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28861}
28862#[doc = "Unzip vectors"]
28863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28864#[inline(always)]
28865#[target_feature(enable = "neon")]
28866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28868pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28869 unsafe {
28870 simd_shuffle!(
28871 a,
28872 b,
28873 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28874 )
28875 }
28876}
28877#[doc = "Unzip vectors"]
28878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28879#[inline(always)]
28880#[target_feature(enable = "neon")]
28881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28882#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28883pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28884 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28885}
28886#[doc = "Unzip vectors"]
28887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28888#[inline(always)]
28889#[target_feature(enable = "neon")]
28890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28891#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28892pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28893 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28894}
28895#[doc = "Unzip vectors"]
28896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28897#[inline(always)]
28898#[target_feature(enable = "neon")]
28899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28900#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28901pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28902 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28903}
28904#[doc = "Unzip vectors"]
28905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28906#[inline(always)]
28907#[target_feature(enable = "neon")]
28908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28909#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28910pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28911 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28912}
28913#[doc = "Unzip vectors"]
28914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28915#[inline(always)]
28916#[target_feature(enable = "neon")]
28917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28918#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28919pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28920 unsafe {
28921 simd_shuffle!(
28922 a,
28923 b,
28924 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28925 )
28926 }
28927}
28928#[doc = "Unzip vectors"]
28929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28930#[inline(always)]
28931#[target_feature(enable = "neon")]
28932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28933#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28934pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28935 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28936}
28937#[doc = "Unzip vectors"]
28938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28939#[inline(always)]
28940#[target_feature(enable = "neon")]
28941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28942#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28943pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28944 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28945}
28946#[doc = "Unzip vectors"]
28947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28948#[inline(always)]
28949#[target_feature(enable = "neon")]
28950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28951#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28952pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28953 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28954}
28955#[doc = "Unzip vectors"]
28956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28957#[inline(always)]
28958#[target_feature(enable = "neon")]
28959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28960#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28961pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28962 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28963}
28964#[doc = "Unzip vectors"]
28965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28966#[inline(always)]
28967#[target_feature(enable = "neon")]
28968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28969#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28970pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28971 unsafe {
28972 simd_shuffle!(
28973 a,
28974 b,
28975 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28976 )
28977 }
28978}
28979#[doc = "Unzip vectors"]
28980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28981#[inline(always)]
28982#[target_feature(enable = "neon")]
28983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28984#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28985pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28986 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28987}
28988#[doc = "Unzip vectors"]
28989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28990#[inline(always)]
28991#[target_feature(enable = "neon")]
28992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28993#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28994pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28995 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28996}
28997#[doc = "Unzip vectors"]
28998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28999#[inline(always)]
29000#[target_feature(enable = "neon,fp16")]
29001#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29002#[cfg(not(target_arch = "arm64ec"))]
29003#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29004pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29005 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29006}
29007#[doc = "Unzip vectors"]
29008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
29009#[inline(always)]
29010#[target_feature(enable = "neon,fp16")]
29011#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29012#[cfg(not(target_arch = "arm64ec"))]
29013#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29014pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29015 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29016}
29017#[doc = "Unzip vectors"]
29018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
29019#[inline(always)]
29020#[target_feature(enable = "neon")]
29021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29022#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29023pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29024 unsafe { simd_shuffle!(a, b, [1, 3]) }
29025}
29026#[doc = "Unzip vectors"]
29027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
29028#[inline(always)]
29029#[target_feature(enable = "neon")]
29030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29031#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29032pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29033 unsafe { simd_shuffle!(a, b, [1, 3]) }
29034}
29035#[doc = "Unzip vectors"]
29036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
29037#[inline(always)]
29038#[target_feature(enable = "neon")]
29039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29041pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29042 unsafe { simd_shuffle!(a, b, [1, 3]) }
29043}
29044#[doc = "Unzip vectors"]
29045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
29046#[inline(always)]
29047#[target_feature(enable = "neon")]
29048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29050pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29051 unsafe { simd_shuffle!(a, b, [1, 3]) }
29052}
29053#[doc = "Unzip vectors"]
29054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
29055#[inline(always)]
29056#[target_feature(enable = "neon")]
29057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29058#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29059pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29060 unsafe { simd_shuffle!(a, b, [1, 3]) }
29061}
29062#[doc = "Unzip vectors"]
29063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
29064#[inline(always)]
29065#[target_feature(enable = "neon")]
29066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29067#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29068pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29069 unsafe { simd_shuffle!(a, b, [1, 3]) }
29070}
29071#[doc = "Unzip vectors"]
29072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
29073#[inline(always)]
29074#[target_feature(enable = "neon")]
29075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29076#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29077pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29078 unsafe { simd_shuffle!(a, b, [1, 3]) }
29079}
29080#[doc = "Unzip vectors"]
29081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
29082#[inline(always)]
29083#[target_feature(enable = "neon")]
29084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29085#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29086pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29087 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29088}
29089#[doc = "Unzip vectors"]
29090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
29091#[inline(always)]
29092#[target_feature(enable = "neon")]
29093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29095pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29096 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29097}
29098#[doc = "Unzip vectors"]
29099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
29100#[inline(always)]
29101#[target_feature(enable = "neon")]
29102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29103#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29104pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29105 unsafe {
29106 simd_shuffle!(
29107 a,
29108 b,
29109 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29110 )
29111 }
29112}
29113#[doc = "Unzip vectors"]
29114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
29115#[inline(always)]
29116#[target_feature(enable = "neon")]
29117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29119pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29120 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29121}
29122#[doc = "Unzip vectors"]
29123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
29124#[inline(always)]
29125#[target_feature(enable = "neon")]
29126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29128pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29129 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29130}
29131#[doc = "Unzip vectors"]
29132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
29133#[inline(always)]
29134#[target_feature(enable = "neon")]
29135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29136#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29137pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29138 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29139}
29140#[doc = "Unzip vectors"]
29141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
29142#[inline(always)]
29143#[target_feature(enable = "neon")]
29144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29145#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29146pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29147 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29148}
29149#[doc = "Unzip vectors"]
29150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
29151#[inline(always)]
29152#[target_feature(enable = "neon")]
29153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29154#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29155pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29156 unsafe {
29157 simd_shuffle!(
29158 a,
29159 b,
29160 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29161 )
29162 }
29163}
29164#[doc = "Unzip vectors"]
29165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
29166#[inline(always)]
29167#[target_feature(enable = "neon")]
29168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29169#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29170pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29171 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29172}
29173#[doc = "Unzip vectors"]
29174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
29175#[inline(always)]
29176#[target_feature(enable = "neon")]
29177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29178#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29179pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29180 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29181}
29182#[doc = "Unzip vectors"]
29183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
29184#[inline(always)]
29185#[target_feature(enable = "neon")]
29186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29187#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29188pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29189 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29190}
29191#[doc = "Unzip vectors"]
29192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
29193#[inline(always)]
29194#[target_feature(enable = "neon")]
29195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29196#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29197pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29198 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29199}
29200#[doc = "Unzip vectors"]
29201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
29202#[inline(always)]
29203#[target_feature(enable = "neon")]
29204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29205#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29206pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29207 unsafe {
29208 simd_shuffle!(
29209 a,
29210 b,
29211 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29212 )
29213 }
29214}
29215#[doc = "Unzip vectors"]
29216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
29217#[inline(always)]
29218#[target_feature(enable = "neon")]
29219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29220#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29221pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29222 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29223}
29224#[doc = "Unzip vectors"]
29225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
29226#[inline(always)]
29227#[target_feature(enable = "neon")]
29228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29229#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29230pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29231 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29232}
29233#[doc = "Exclusive OR and rotate"]
29234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
29235#[inline(always)]
29236#[target_feature(enable = "neon,sha3")]
29237#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
29238#[rustc_legacy_const_generics(2)]
29239#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
29240pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29241 static_assert_uimm_bits!(IMM6, 6);
29242 unsafe extern "unadjusted" {
29243 #[cfg_attr(
29244 any(target_arch = "aarch64", target_arch = "arm64ec"),
29245 link_name = "llvm.aarch64.crypto.xar"
29246 )]
29247 fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
29248 }
29249 unsafe { _vxarq_u64(a, b, IMM6 as i64) }
29250}
29251#[doc = "Zip vectors"]
29252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
29253#[inline(always)]
29254#[target_feature(enable = "neon,fp16")]
29255#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29256#[cfg(not(target_arch = "arm64ec"))]
29257#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29258pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29259 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29260}
29261#[doc = "Zip vectors"]
29262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
29263#[inline(always)]
29264#[target_feature(enable = "neon,fp16")]
29265#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29266#[cfg(not(target_arch = "arm64ec"))]
29267#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29268pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29269 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29270}
29271#[doc = "Zip vectors"]
29272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
29273#[inline(always)]
29274#[target_feature(enable = "neon")]
29275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29276#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29277pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29278 unsafe { simd_shuffle!(a, b, [0, 2]) }
29279}
29280#[doc = "Zip vectors"]
29281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
29282#[inline(always)]
29283#[target_feature(enable = "neon")]
29284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29285#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29286pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29287 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29288}
29289#[doc = "Zip vectors"]
29290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
29291#[inline(always)]
29292#[target_feature(enable = "neon")]
29293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29294#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29295pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29296 unsafe { simd_shuffle!(a, b, [0, 2]) }
29297}
29298#[doc = "Zip vectors"]
29299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
29300#[inline(always)]
29301#[target_feature(enable = "neon")]
29302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29303#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29304pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29305 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29306}
29307#[doc = "Zip vectors"]
29308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
29309#[inline(always)]
29310#[target_feature(enable = "neon")]
29311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29312#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29313pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29314 unsafe {
29315 simd_shuffle!(
29316 a,
29317 b,
29318 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29319 )
29320 }
29321}
29322#[doc = "Zip vectors"]
29323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
29324#[inline(always)]
29325#[target_feature(enable = "neon")]
29326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29327#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29328pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29329 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29330}
29331#[doc = "Zip vectors"]
29332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
29333#[inline(always)]
29334#[target_feature(enable = "neon")]
29335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29336#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29337pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29338 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29339}
29340#[doc = "Zip vectors"]
29341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
29342#[inline(always)]
29343#[target_feature(enable = "neon")]
29344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29345#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29346pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29347 unsafe { simd_shuffle!(a, b, [0, 2]) }
29348}
29349#[doc = "Zip vectors"]
29350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
29351#[inline(always)]
29352#[target_feature(enable = "neon")]
29353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29354#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29355pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29356 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29357}
29358#[doc = "Zip vectors"]
29359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
29360#[inline(always)]
29361#[target_feature(enable = "neon")]
29362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29363#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29364pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29365 unsafe { simd_shuffle!(a, b, [0, 2]) }
29366}
29367#[doc = "Zip vectors"]
29368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
29369#[inline(always)]
29370#[target_feature(enable = "neon")]
29371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29372#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29373pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29374 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29375}
29376#[doc = "Zip vectors"]
29377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
29378#[inline(always)]
29379#[target_feature(enable = "neon")]
29380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29381#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29382pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29383 unsafe {
29384 simd_shuffle!(
29385 a,
29386 b,
29387 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29388 )
29389 }
29390}
29391#[doc = "Zip vectors"]
29392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
29393#[inline(always)]
29394#[target_feature(enable = "neon")]
29395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29396#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29397pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29398 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29399}
29400#[doc = "Zip vectors"]
29401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
29402#[inline(always)]
29403#[target_feature(enable = "neon")]
29404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29405#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29406pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29407 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29408}
29409#[doc = "Zip vectors"]
29410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
29411#[inline(always)]
29412#[target_feature(enable = "neon")]
29413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29414#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29415pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29416 unsafe { simd_shuffle!(a, b, [0, 2]) }
29417}
29418#[doc = "Zip vectors"]
29419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
29420#[inline(always)]
29421#[target_feature(enable = "neon")]
29422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29423#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29424pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29425 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29426}
29427#[doc = "Zip vectors"]
29428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
29429#[inline(always)]
29430#[target_feature(enable = "neon")]
29431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29432#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29433pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29434 unsafe { simd_shuffle!(a, b, [0, 2]) }
29435}
29436#[doc = "Zip vectors"]
29437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
29438#[inline(always)]
29439#[target_feature(enable = "neon")]
29440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29441#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29442pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29443 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29444}
29445#[doc = "Zip vectors"]
29446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
29447#[inline(always)]
29448#[target_feature(enable = "neon")]
29449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29450#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29451pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29452 unsafe {
29453 simd_shuffle!(
29454 a,
29455 b,
29456 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29457 )
29458 }
29459}
29460#[doc = "Zip vectors"]
29461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29462#[inline(always)]
29463#[target_feature(enable = "neon")]
29464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29465#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29466pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29467 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29468}
29469#[doc = "Zip vectors"]
29470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29471#[inline(always)]
29472#[target_feature(enable = "neon")]
29473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29474#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29475pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29476 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29477}
29478#[doc = "Zip vectors"]
29479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29480#[inline(always)]
29481#[target_feature(enable = "neon")]
29482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29483#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29484pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29485 unsafe { simd_shuffle!(a, b, [0, 2]) }
29486}
29487#[doc = "Zip vectors"]
29488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29489#[inline(always)]
29490#[target_feature(enable = "neon,fp16")]
29491#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29492#[cfg(not(target_arch = "arm64ec"))]
29493#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29494pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29495 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29496}
29497#[doc = "Zip vectors"]
29498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29499#[inline(always)]
29500#[target_feature(enable = "neon,fp16")]
29501#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
29502#[cfg(not(target_arch = "arm64ec"))]
29503#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29504pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29505 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29506}
29507#[doc = "Zip vectors"]
29508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29509#[inline(always)]
29510#[target_feature(enable = "neon")]
29511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29512#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29513pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29514 unsafe { simd_shuffle!(a, b, [1, 3]) }
29515}
29516#[doc = "Zip vectors"]
29517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29518#[inline(always)]
29519#[target_feature(enable = "neon")]
29520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29521#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29522pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29523 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29524}
29525#[doc = "Zip vectors"]
29526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29527#[inline(always)]
29528#[target_feature(enable = "neon")]
29529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29530#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29531pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29532 unsafe { simd_shuffle!(a, b, [1, 3]) }
29533}
29534#[doc = "Zip vectors"]
29535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29536#[inline(always)]
29537#[target_feature(enable = "neon")]
29538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29539#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29540pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29541 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29542}
29543#[doc = "Zip vectors"]
29544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29545#[inline(always)]
29546#[target_feature(enable = "neon")]
29547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29548#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29549pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29550 unsafe {
29551 simd_shuffle!(
29552 a,
29553 b,
29554 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29555 )
29556 }
29557}
29558#[doc = "Zip vectors"]
29559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29560#[inline(always)]
29561#[target_feature(enable = "neon")]
29562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29563#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29564pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29565 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29566}
29567#[doc = "Zip vectors"]
29568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29569#[inline(always)]
29570#[target_feature(enable = "neon")]
29571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29572#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29573pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29574 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29575}
29576#[doc = "Zip vectors"]
29577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29578#[inline(always)]
29579#[target_feature(enable = "neon")]
29580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29581#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29582pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29583 unsafe { simd_shuffle!(a, b, [1, 3]) }
29584}
29585#[doc = "Zip vectors"]
29586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29587#[inline(always)]
29588#[target_feature(enable = "neon")]
29589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29590#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29591pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29592 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29593}
29594#[doc = "Zip vectors"]
29595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29596#[inline(always)]
29597#[target_feature(enable = "neon")]
29598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29599#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29600pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29601 unsafe { simd_shuffle!(a, b, [1, 3]) }
29602}
29603#[doc = "Zip vectors"]
29604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29605#[inline(always)]
29606#[target_feature(enable = "neon")]
29607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29608#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29609pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29610 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29611}
29612#[doc = "Zip vectors"]
29613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29614#[inline(always)]
29615#[target_feature(enable = "neon")]
29616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29617#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29618pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29619 unsafe {
29620 simd_shuffle!(
29621 a,
29622 b,
29623 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29624 )
29625 }
29626}
29627#[doc = "Zip vectors"]
29628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29629#[inline(always)]
29630#[target_feature(enable = "neon")]
29631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29632#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29633pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29634 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29635}
29636#[doc = "Zip vectors"]
29637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29638#[inline(always)]
29639#[target_feature(enable = "neon")]
29640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29641#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29642pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29643 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29644}
29645#[doc = "Zip vectors"]
29646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29647#[inline(always)]
29648#[target_feature(enable = "neon")]
29649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29650#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29651pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29652 unsafe { simd_shuffle!(a, b, [1, 3]) }
29653}
29654#[doc = "Zip vectors"]
29655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29656#[inline(always)]
29657#[target_feature(enable = "neon")]
29658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29659#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29660pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29661 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29662}
29663#[doc = "Zip vectors"]
29664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29665#[inline(always)]
29666#[target_feature(enable = "neon")]
29667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29668#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29669pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29670 unsafe { simd_shuffle!(a, b, [1, 3]) }
29671}
29672#[doc = "Zip vectors"]
29673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29674#[inline(always)]
29675#[target_feature(enable = "neon")]
29676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29677#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29678pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29679 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29680}
29681#[doc = "Zip vectors"]
29682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29683#[inline(always)]
29684#[target_feature(enable = "neon")]
29685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29686#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29687pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29688 unsafe {
29689 simd_shuffle!(
29690 a,
29691 b,
29692 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29693 )
29694 }
29695}
29696#[doc = "Zip vectors"]
29697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29698#[inline(always)]
29699#[target_feature(enable = "neon")]
29700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29701#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29702pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29703 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29704}
29705#[doc = "Zip vectors"]
29706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29707#[inline(always)]
29708#[target_feature(enable = "neon")]
29709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29710#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29711pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29712 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29713}
29714#[doc = "Zip vectors"]
29715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29716#[inline(always)]
29717#[target_feature(enable = "neon")]
29718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29719#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29720pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29721 unsafe { simd_shuffle!(a, b, [1, 3]) }
29722}