1#![allow(non_camel_case_types)]
7#![allow(unused_imports)]
8
9use crate::{core_arch::simd, intrinsics::simd::*, marker::Sized, mem, ptr};
10
11#[cfg(test)]
12use stdarch_test::assert_instr;
13
14types! {
15 #![stable(feature = "wasm_simd", since = "1.54.0")]
16
17 pub struct v128(4 x i32);
39}
40
41macro_rules! conversions {
42 ($(($name:ident = $ty:ty))*) => {
43 impl v128 {
44 $(
45 #[inline(always)]
46 pub(crate) fn $name(self) -> $ty {
47 unsafe { mem::transmute(self) }
48 }
49 )*
50 }
51 $(
52 impl $ty {
53 #[inline(always)]
54 pub(crate) const fn v128(self) -> v128 {
55 unsafe { mem::transmute(self) }
56 }
57 }
58 )*
59 }
60}
61
62conversions! {
63 (as_u8x16 = simd::u8x16)
64 (as_u16x8 = simd::u16x8)
65 (as_u32x4 = simd::u32x4)
66 (as_u64x2 = simd::u64x2)
67 (as_i8x16 = simd::i8x16)
68 (as_i16x8 = simd::i16x8)
69 (as_i32x4 = simd::i32x4)
70 (as_i64x2 = simd::i64x2)
71 (as_f32x4 = simd::f32x4)
72 (as_f64x2 = simd::f64x2)
73}
74
75#[allow(improper_ctypes)]
76unsafe extern "unadjusted" {
77 #[link_name = "llvm.wasm.swizzle"]
78 fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
79
80 #[link_name = "llvm.wasm.bitselect.v16i8"]
81 fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
82 #[link_name = "llvm.wasm.anytrue.v16i8"]
83 fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
84
85 #[link_name = "llvm.wasm.alltrue.v16i8"]
86 fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87 #[link_name = "llvm.wasm.bitmask.v16i8"]
88 fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
89 #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
90 fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
91 #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
92 fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
93 #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
94 fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
95
96 #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
97 fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
98 #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
99 fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
100 #[link_name = "llvm.wasm.q15mulr.sat.signed"]
101 fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
102 #[link_name = "llvm.wasm.alltrue.v8i16"]
103 fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
104 #[link_name = "llvm.wasm.bitmask.v8i16"]
105 fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
106 #[link_name = "llvm.wasm.narrow.signed.v8i16.v4i32"]
107 fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
108 #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
109 fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
110 #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
111 fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
112
113 #[link_name = "llvm.wasm.extadd.pairwise.signed.v4i32"]
114 fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
115 #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v4i32"]
116 fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
117 #[link_name = "llvm.wasm.alltrue.v4i32"]
118 fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
119 #[link_name = "llvm.wasm.bitmask.v4i32"]
120 fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
121 #[link_name = "llvm.wasm.dot"]
122 fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
123
124 #[link_name = "llvm.wasm.alltrue.v2i64"]
125 fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
126 #[link_name = "llvm.wasm.bitmask.v2i64"]
127 fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
128
129 #[link_name = "llvm.nearbyint.v4f32"]
130 fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
131 #[link_name = "llvm.minimum.v4f32"]
132 fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
133 #[link_name = "llvm.maximum.v4f32"]
134 fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
135
136 #[link_name = "llvm.nearbyint.v2f64"]
137 fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
138 #[link_name = "llvm.minimum.v2f64"]
139 fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
140 #[link_name = "llvm.maximum.v2f64"]
141 fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
142}
143
144#[inline]
167#[cfg_attr(test, assert_instr(v128.load))]
168#[target_feature(enable = "simd128")]
169#[doc(alias("v128.load"))]
170#[stable(feature = "wasm_simd", since = "1.54.0")]
171pub unsafe fn v128_load(m: *const v128) -> v128 {
172 m.read_unaligned()
173}
174
175#[inline]
184#[cfg_attr(test, assert_instr(v128.load8x8_s))]
185#[target_feature(enable = "simd128")]
186#[doc(alias("v128.load8x8_s"))]
187#[stable(feature = "wasm_simd", since = "1.54.0")]
188pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
189 let m = m.cast::<simd::i8x8>().read_unaligned();
190 simd_cast::<_, simd::i16x8>(m).v128()
191}
192
193#[inline]
202#[cfg_attr(test, assert_instr(v128.load8x8_u))]
203#[target_feature(enable = "simd128")]
204#[doc(alias("v128.load8x8_u"))]
205#[stable(feature = "wasm_simd", since = "1.54.0")]
206pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
207 let m = m.cast::<simd::u8x8>().read_unaligned();
208 simd_cast::<_, simd::u16x8>(m).v128()
209}
210
211#[stable(feature = "wasm_simd", since = "1.54.0")]
212pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
213
214#[inline]
223#[cfg_attr(test, assert_instr(v128.load16x4_s))]
224#[target_feature(enable = "simd128")]
225#[doc(alias("v128.load16x4_s"))]
226#[stable(feature = "wasm_simd", since = "1.54.0")]
227pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
228 let m = m.cast::<simd::i16x4>().read_unaligned();
229 simd_cast::<_, simd::i32x4>(m).v128()
230}
231
232#[inline]
241#[cfg_attr(test, assert_instr(v128.load16x4_u))]
242#[target_feature(enable = "simd128")]
243#[doc(alias("v128.load16x4_u"))]
244#[stable(feature = "wasm_simd", since = "1.54.0")]
245pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
246 let m = m.cast::<simd::u16x4>().read_unaligned();
247 simd_cast::<_, simd::u32x4>(m).v128()
248}
249
250#[stable(feature = "wasm_simd", since = "1.54.0")]
251pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
252
253#[inline]
262#[cfg_attr(test, assert_instr(v128.load32x2_s))]
263#[target_feature(enable = "simd128")]
264#[doc(alias("v128.load32x2_s"))]
265#[stable(feature = "wasm_simd", since = "1.54.0")]
266pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
267 let m = m.cast::<simd::i32x2>().read_unaligned();
268 simd_cast::<_, simd::i64x2>(m).v128()
269}
270
271#[inline]
280#[cfg_attr(test, assert_instr(v128.load32x2_u))]
281#[target_feature(enable = "simd128")]
282#[doc(alias("v128.load32x2_u"))]
283#[stable(feature = "wasm_simd", since = "1.54.0")]
284pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
285 let m = m.cast::<simd::u32x2>().read_unaligned();
286 simd_cast::<_, simd::u64x2>(m).v128()
287}
288
289#[stable(feature = "wasm_simd", since = "1.54.0")]
290pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
291
292#[inline]
305#[cfg_attr(test, assert_instr(v128.load8_splat))]
306#[target_feature(enable = "simd128")]
307#[doc(alias("v128.load8_splat"))]
308#[stable(feature = "wasm_simd", since = "1.54.0")]
309pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
310 u8x16_splat(*m)
311}
312
313#[inline]
326#[cfg_attr(test, assert_instr(v128.load16_splat))]
327#[target_feature(enable = "simd128")]
328#[doc(alias("v128.load16_splat"))]
329#[stable(feature = "wasm_simd", since = "1.54.0")]
330pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
331 u16x8_splat(ptr::read_unaligned(m))
332}
333
334#[inline]
347#[cfg_attr(test, assert_instr(v128.load32_splat))]
348#[target_feature(enable = "simd128")]
349#[doc(alias("v128.load32_splat"))]
350#[stable(feature = "wasm_simd", since = "1.54.0")]
351pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
352 u32x4_splat(ptr::read_unaligned(m))
353}
354
355#[inline]
368#[cfg_attr(test, assert_instr(v128.load64_splat))]
369#[target_feature(enable = "simd128")]
370#[doc(alias("v128.load64_splat"))]
371#[stable(feature = "wasm_simd", since = "1.54.0")]
372pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
373 u64x2_splat(ptr::read_unaligned(m))
374}
375
376#[inline]
389#[cfg_attr(test, assert_instr(v128.load32_zero))]
390#[target_feature(enable = "simd128")]
391#[doc(alias("v128.load32_zero"))]
392#[stable(feature = "wasm_simd", since = "1.54.0")]
393pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
394 u32x4(ptr::read_unaligned(m), 0, 0, 0)
395}
396
397#[inline]
410#[cfg_attr(test, assert_instr(v128.load64_zero))]
411#[target_feature(enable = "simd128")]
412#[doc(alias("v128.load64_zero"))]
413#[stable(feature = "wasm_simd", since = "1.54.0")]
414pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
415 u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
416}
417
418#[inline]
441#[cfg_attr(test, assert_instr(v128.store))]
442#[target_feature(enable = "simd128")]
443#[doc(alias("v128.store"))]
444#[stable(feature = "wasm_simd", since = "1.54.0")]
445pub unsafe fn v128_store(m: *mut v128, a: v128) {
446 m.write_unaligned(a)
447}
448
449#[inline]
461#[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
462#[target_feature(enable = "simd128")]
463#[doc(alias("v128.load8_lane"))]
464#[stable(feature = "wasm_simd", since = "1.54.0")]
465pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
466 u8x16_replace_lane::<L>(v, *m)
467}
468
469#[inline]
481#[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
482#[target_feature(enable = "simd128")]
483#[doc(alias("v128.load16_lane"))]
484#[stable(feature = "wasm_simd", since = "1.54.0")]
485pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
486 u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
487}
488
489#[inline]
501#[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
502#[target_feature(enable = "simd128")]
503#[doc(alias("v128.load32_lane"))]
504#[stable(feature = "wasm_simd", since = "1.54.0")]
505pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
506 u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
507}
508
509#[inline]
521#[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
522#[target_feature(enable = "simd128")]
523#[doc(alias("v128.load64_lane"))]
524#[stable(feature = "wasm_simd", since = "1.54.0")]
525pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
526 u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
527}
528
529#[inline]
541#[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
542#[target_feature(enable = "simd128")]
543#[doc(alias("v128.store8_lane"))]
544#[stable(feature = "wasm_simd", since = "1.54.0")]
545pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
546 *m = u8x16_extract_lane::<L>(v);
547}
548
549#[inline]
561#[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
562#[target_feature(enable = "simd128")]
563#[doc(alias("v128.store16_lane"))]
564#[stable(feature = "wasm_simd", since = "1.54.0")]
565pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
566 ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
567}
568
569#[inline]
581#[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
582#[target_feature(enable = "simd128")]
583#[doc(alias("v128.store32_lane"))]
584#[stable(feature = "wasm_simd", since = "1.54.0")]
585pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
586 ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
587}
588
589#[inline]
601#[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
602#[target_feature(enable = "simd128")]
603#[doc(alias("v128.store64_lane"))]
604#[stable(feature = "wasm_simd", since = "1.54.0")]
605pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
606 ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
607}
608
609#[inline]
614#[cfg_attr(
615 test,
616 assert_instr(
617 v128.const,
618 a0 = 0,
619 a1 = 1,
620 a2 = 2,
621 a3 = 3,
622 a4 = 4,
623 a5 = 5,
624 a6 = 6,
625 a7 = 7,
626 a8 = 8,
627 a9 = 9,
628 a10 = 10,
629 a11 = 11,
630 a12 = 12,
631 a13 = 13,
632 a14 = 14,
633 a15 = 15,
634 )
635)]
636#[doc(alias("v128.const"))]
637#[stable(feature = "wasm_simd", since = "1.54.0")]
638#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
639#[target_feature(enable = "simd128")]
640pub const fn i8x16(
641 a0: i8,
642 a1: i8,
643 a2: i8,
644 a3: i8,
645 a4: i8,
646 a5: i8,
647 a6: i8,
648 a7: i8,
649 a8: i8,
650 a9: i8,
651 a10: i8,
652 a11: i8,
653 a12: i8,
654 a13: i8,
655 a14: i8,
656 a15: i8,
657) -> v128 {
658 simd::i8x16::new(
659 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
660 )
661 .v128()
662}
663
664#[inline]
669#[doc(alias("v128.const"))]
670#[stable(feature = "wasm_simd", since = "1.54.0")]
671#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
672#[target_feature(enable = "simd128")]
673pub const fn u8x16(
674 a0: u8,
675 a1: u8,
676 a2: u8,
677 a3: u8,
678 a4: u8,
679 a5: u8,
680 a6: u8,
681 a7: u8,
682 a8: u8,
683 a9: u8,
684 a10: u8,
685 a11: u8,
686 a12: u8,
687 a13: u8,
688 a14: u8,
689 a15: u8,
690) -> v128 {
691 simd::u8x16::new(
692 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
693 )
694 .v128()
695}
696
697#[inline]
702#[cfg_attr(
703 test,
704 assert_instr(
705 v128.const,
706 a0 = 0,
707 a1 = 1,
708 a2 = 2,
709 a3 = 3,
710 a4 = 4,
711 a5 = 5,
712 a6 = 6,
713 a7 = 7,
714 )
715)]
716#[doc(alias("v128.const"))]
717#[stable(feature = "wasm_simd", since = "1.54.0")]
718#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
719#[target_feature(enable = "simd128")]
720pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
721 simd::i16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
722}
723
724#[inline]
729#[doc(alias("v128.const"))]
730#[stable(feature = "wasm_simd", since = "1.54.0")]
731#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
732#[target_feature(enable = "simd128")]
733pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
734 simd::u16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
735}
736
737#[inline]
742#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
743#[doc(alias("v128.const"))]
744#[stable(feature = "wasm_simd", since = "1.54.0")]
745#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
746#[target_feature(enable = "simd128")]
747pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
748 simd::i32x4::new(a0, a1, a2, a3).v128()
749}
750
751#[inline]
756#[doc(alias("v128.const"))]
757#[stable(feature = "wasm_simd", since = "1.54.0")]
758#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
759#[target_feature(enable = "simd128")]
760pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
761 simd::u32x4::new(a0, a1, a2, a3).v128()
762}
763
764#[inline]
769#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
770#[doc(alias("v128.const"))]
771#[stable(feature = "wasm_simd", since = "1.54.0")]
772#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
773#[target_feature(enable = "simd128")]
774pub const fn i64x2(a0: i64, a1: i64) -> v128 {
775 simd::i64x2::new(a0, a1).v128()
776}
777
778#[inline]
783#[doc(alias("v128.const"))]
784#[stable(feature = "wasm_simd", since = "1.54.0")]
785#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
786#[target_feature(enable = "simd128")]
787pub const fn u64x2(a0: u64, a1: u64) -> v128 {
788 simd::u64x2::new(a0, a1).v128()
789}
790
791#[inline]
796#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
797#[doc(alias("v128.const"))]
798#[stable(feature = "wasm_simd", since = "1.54.0")]
799#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
800#[target_feature(enable = "simd128")]
801pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
802 simd::f32x4::new(a0, a1, a2, a3).v128()
803}
804
805#[inline]
810#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
811#[doc(alias("v128.const"))]
812#[stable(feature = "wasm_simd", since = "1.54.0")]
813#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
814#[target_feature(enable = "simd128")]
815pub const fn f64x2(a0: f64, a1: f64) -> v128 {
816 simd::f64x2::new(a0, a1).v128()
817}
818
819#[inline]
834#[cfg_attr(test,
835 assert_instr(
836 i8x16.shuffle,
837 I0 = 0,
838 I1 = 2,
839 I2 = 4,
840 I3 = 6,
841 I4 = 8,
842 I5 = 10,
843 I6 = 12,
844 I7 = 14,
845 I8 = 16,
846 I9 = 18,
847 I10 = 20,
848 I11 = 22,
849 I12 = 24,
850 I13 = 26,
851 I14 = 28,
852 I15 = 30,
853 )
854)]
855#[target_feature(enable = "simd128")]
856#[doc(alias("i8x16.shuffle"))]
857#[stable(feature = "wasm_simd", since = "1.54.0")]
858pub fn i8x16_shuffle<
859 const I0: usize,
860 const I1: usize,
861 const I2: usize,
862 const I3: usize,
863 const I4: usize,
864 const I5: usize,
865 const I6: usize,
866 const I7: usize,
867 const I8: usize,
868 const I9: usize,
869 const I10: usize,
870 const I11: usize,
871 const I12: usize,
872 const I13: usize,
873 const I14: usize,
874 const I15: usize,
875>(
876 a: v128,
877 b: v128,
878) -> v128 {
879 static_assert!(I0 < 32);
880 static_assert!(I1 < 32);
881 static_assert!(I2 < 32);
882 static_assert!(I3 < 32);
883 static_assert!(I4 < 32);
884 static_assert!(I5 < 32);
885 static_assert!(I6 < 32);
886 static_assert!(I7 < 32);
887 static_assert!(I8 < 32);
888 static_assert!(I9 < 32);
889 static_assert!(I10 < 32);
890 static_assert!(I11 < 32);
891 static_assert!(I12 < 32);
892 static_assert!(I13 < 32);
893 static_assert!(I14 < 32);
894 static_assert!(I15 < 32);
895 let shuf: simd::u8x16 = unsafe {
896 simd_shuffle!(
897 a.as_u8x16(),
898 b.as_u8x16(),
899 [
900 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
901 I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
902 I14 as u32, I15 as u32,
903 ],
904 )
905 };
906 shuf.v128()
907}
908
909#[stable(feature = "wasm_simd", since = "1.54.0")]
910pub use i8x16_shuffle as u8x16_shuffle;
911
912#[inline]
920#[cfg_attr(test,
921 assert_instr(
922 i8x16.shuffle,
923 I0 = 0,
924 I1 = 2,
925 I2 = 4,
926 I3 = 6,
927 I4 = 8,
928 I5 = 10,
929 I6 = 12,
930 I7 = 14,
931 )
932)]
933#[target_feature(enable = "simd128")]
934#[doc(alias("i8x16.shuffle"))]
935#[stable(feature = "wasm_simd", since = "1.54.0")]
936pub fn i16x8_shuffle<
937 const I0: usize,
938 const I1: usize,
939 const I2: usize,
940 const I3: usize,
941 const I4: usize,
942 const I5: usize,
943 const I6: usize,
944 const I7: usize,
945>(
946 a: v128,
947 b: v128,
948) -> v128 {
949 static_assert!(I0 < 16);
950 static_assert!(I1 < 16);
951 static_assert!(I2 < 16);
952 static_assert!(I3 < 16);
953 static_assert!(I4 < 16);
954 static_assert!(I5 < 16);
955 static_assert!(I6 < 16);
956 static_assert!(I7 < 16);
957 let shuf: simd::u16x8 = unsafe {
958 simd_shuffle!(
959 a.as_u16x8(),
960 b.as_u16x8(),
961 [
962 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
963 I7 as u32,
964 ],
965 )
966 };
967 shuf.v128()
968}
969
970#[stable(feature = "wasm_simd", since = "1.54.0")]
971pub use i16x8_shuffle as u16x8_shuffle;
972
973#[inline]
981#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
982#[target_feature(enable = "simd128")]
983#[doc(alias("i8x16.shuffle"))]
984#[stable(feature = "wasm_simd", since = "1.54.0")]
985pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
986 a: v128,
987 b: v128,
988) -> v128 {
989 static_assert!(I0 < 8);
990 static_assert!(I1 < 8);
991 static_assert!(I2 < 8);
992 static_assert!(I3 < 8);
993 let shuf: simd::u32x4 = unsafe {
994 simd_shuffle!(
995 a.as_u32x4(),
996 b.as_u32x4(),
997 [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
998 )
999 };
1000 shuf.v128()
1001}
1002
1003#[stable(feature = "wasm_simd", since = "1.54.0")]
1004pub use i32x4_shuffle as u32x4_shuffle;
1005
1006#[inline]
1014#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1015#[target_feature(enable = "simd128")]
1016#[doc(alias("i8x16.shuffle"))]
1017#[stable(feature = "wasm_simd", since = "1.54.0")]
1018pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1019 static_assert!(I0 < 4);
1020 static_assert!(I1 < 4);
1021 let shuf: simd::u64x2 =
1022 unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
1023 shuf.v128()
1024}
1025
1026#[stable(feature = "wasm_simd", since = "1.54.0")]
1027pub use i64x2_shuffle as u64x2_shuffle;
1028
1029#[inline]
1034#[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1035#[target_feature(enable = "simd128")]
1036#[doc(alias("i8x16.extract_lane_s"))]
1037#[stable(feature = "wasm_simd", since = "1.54.0")]
1038pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1039 static_assert!(N < 16);
1040 unsafe { simd_extract!(a.as_i8x16(), N as u32) }
1041}
1042
1043#[inline]
1048#[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1049#[target_feature(enable = "simd128")]
1050#[doc(alias("i8x16.extract_lane_u"))]
1051#[stable(feature = "wasm_simd", since = "1.54.0")]
1052pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1053 static_assert!(N < 16);
1054 unsafe { simd_extract!(a.as_u8x16(), N as u32) }
1055}
1056
1057#[inline]
1062#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1063#[target_feature(enable = "simd128")]
1064#[doc(alias("i8x16.replace_lane"))]
1065#[stable(feature = "wasm_simd", since = "1.54.0")]
1066pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1067 static_assert!(N < 16);
1068 unsafe { simd_insert!(a.as_i8x16(), N as u32, val).v128() }
1069}
1070
1071#[inline]
1076#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1077#[target_feature(enable = "simd128")]
1078#[doc(alias("i8x16.replace_lane"))]
1079#[stable(feature = "wasm_simd", since = "1.54.0")]
1080pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1081 static_assert!(N < 16);
1082 unsafe { simd_insert!(a.as_u8x16(), N as u32, val).v128() }
1083}
1084
1085#[inline]
1090#[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1091#[target_feature(enable = "simd128")]
1092#[doc(alias("i16x8.extract_lane_s"))]
1093#[stable(feature = "wasm_simd", since = "1.54.0")]
1094pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1095 static_assert!(N < 8);
1096 unsafe { simd_extract!(a.as_i16x8(), N as u32) }
1097}
1098
1099#[inline]
1104#[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1105#[target_feature(enable = "simd128")]
1106#[doc(alias("i16x8.extract_lane_u"))]
1107#[stable(feature = "wasm_simd", since = "1.54.0")]
1108pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1109 static_assert!(N < 8);
1110 unsafe { simd_extract!(a.as_u16x8(), N as u32) }
1111}
1112
1113#[inline]
1118#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1119#[target_feature(enable = "simd128")]
1120#[doc(alias("i16x8.replace_lane"))]
1121#[stable(feature = "wasm_simd", since = "1.54.0")]
1122pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1123 static_assert!(N < 8);
1124 unsafe { simd_insert!(a.as_i16x8(), N as u32, val).v128() }
1125}
1126
1127#[inline]
1132#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1133#[target_feature(enable = "simd128")]
1134#[doc(alias("i16x8.replace_lane"))]
1135#[stable(feature = "wasm_simd", since = "1.54.0")]
1136pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1137 static_assert!(N < 8);
1138 unsafe { simd_insert!(a.as_u16x8(), N as u32, val).v128() }
1139}
1140
1141#[inline]
1146#[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1147#[target_feature(enable = "simd128")]
1148#[doc(alias("i32x4.extract_lane"))]
1149#[stable(feature = "wasm_simd", since = "1.54.0")]
1150pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1151 static_assert!(N < 4);
1152 unsafe { simd_extract!(a.as_i32x4(), N as u32) }
1153}
1154
1155#[inline]
1160#[target_feature(enable = "simd128")]
1161#[doc(alias("i32x4.extract_lane"))]
1162#[stable(feature = "wasm_simd", since = "1.54.0")]
1163pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1164 i32x4_extract_lane::<N>(a) as u32
1165}
1166
1167#[inline]
1172#[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1173#[target_feature(enable = "simd128")]
1174#[doc(alias("i32x4.replace_lane"))]
1175#[stable(feature = "wasm_simd", since = "1.54.0")]
1176pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1177 static_assert!(N < 4);
1178 unsafe { simd_insert!(a.as_i32x4(), N as u32, val).v128() }
1179}
1180
1181#[inline]
1186#[target_feature(enable = "simd128")]
1187#[doc(alias("i32x4.replace_lane"))]
1188#[stable(feature = "wasm_simd", since = "1.54.0")]
1189pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1190 i32x4_replace_lane::<N>(a, val as i32)
1191}
1192
1193#[inline]
1198#[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1199#[target_feature(enable = "simd128")]
1200#[doc(alias("i64x2.extract_lane"))]
1201#[stable(feature = "wasm_simd", since = "1.54.0")]
1202pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1203 static_assert!(N < 2);
1204 unsafe { simd_extract!(a.as_i64x2(), N as u32) }
1205}
1206
1207#[inline]
1212#[target_feature(enable = "simd128")]
1213#[doc(alias("i64x2.extract_lane"))]
1214#[stable(feature = "wasm_simd", since = "1.54.0")]
1215pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1216 i64x2_extract_lane::<N>(a) as u64
1217}
1218
1219#[inline]
1224#[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1225#[target_feature(enable = "simd128")]
1226#[doc(alias("i64x2.replace_lane"))]
1227#[stable(feature = "wasm_simd", since = "1.54.0")]
1228pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1229 static_assert!(N < 2);
1230 unsafe { simd_insert!(a.as_i64x2(), N as u32, val).v128() }
1231}
1232
1233#[inline]
1238#[target_feature(enable = "simd128")]
1239#[doc(alias("i64x2.replace_lane"))]
1240#[stable(feature = "wasm_simd", since = "1.54.0")]
1241pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1242 i64x2_replace_lane::<N>(a, val as i64)
1243}
1244
1245#[inline]
1250#[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1251#[target_feature(enable = "simd128")]
1252#[doc(alias("f32x4.extract_lane"))]
1253#[stable(feature = "wasm_simd", since = "1.54.0")]
1254pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1255 static_assert!(N < 4);
1256 unsafe { simd_extract!(a.as_f32x4(), N as u32) }
1257}
1258
1259#[inline]
1264#[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1265#[target_feature(enable = "simd128")]
1266#[doc(alias("f32x4.replace_lane"))]
1267#[stable(feature = "wasm_simd", since = "1.54.0")]
1268pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1269 static_assert!(N < 4);
1270 unsafe { simd_insert!(a.as_f32x4(), N as u32, val).v128() }
1271}
1272
1273#[inline]
1278#[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1279#[target_feature(enable = "simd128")]
1280#[doc(alias("f64x2.extract_lane"))]
1281#[stable(feature = "wasm_simd", since = "1.54.0")]
1282pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1283 static_assert!(N < 2);
1284 unsafe { simd_extract!(a.as_f64x2(), N as u32) }
1285}
1286
1287#[inline]
1292#[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1293#[target_feature(enable = "simd128")]
1294#[doc(alias("f64x2.replace_lane"))]
1295#[stable(feature = "wasm_simd", since = "1.54.0")]
1296pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1297 static_assert!(N < 2);
1298 unsafe { simd_insert!(a.as_f64x2(), N as u32, val).v128() }
1299}
1300
1301#[inline]
1307#[cfg_attr(test, assert_instr(i8x16.swizzle))]
1308#[target_feature(enable = "simd128")]
1309#[doc(alias("i8x16.swizzle"))]
1310#[stable(feature = "wasm_simd", since = "1.54.0")]
1311pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1312 unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1313}
1314
1315#[stable(feature = "wasm_simd", since = "1.54.0")]
1316pub use i8x16_swizzle as u8x16_swizzle;
1317
1318#[inline]
1322#[cfg_attr(test, assert_instr(i8x16.splat))]
1323#[target_feature(enable = "simd128")]
1324#[doc(alias("i8x16.splat"))]
1325#[stable(feature = "wasm_simd", since = "1.54.0")]
1326pub fn i8x16_splat(a: i8) -> v128 {
1327 simd::i8x16::splat(a).v128()
1328}
1329
1330#[inline]
1334#[cfg_attr(test, assert_instr(i8x16.splat))]
1335#[target_feature(enable = "simd128")]
1336#[doc(alias("i8x16.splat"))]
1337#[stable(feature = "wasm_simd", since = "1.54.0")]
1338pub fn u8x16_splat(a: u8) -> v128 {
1339 simd::u8x16::splat(a).v128()
1340}
1341
1342#[inline]
1346#[cfg_attr(test, assert_instr(i16x8.splat))]
1347#[target_feature(enable = "simd128")]
1348#[doc(alias("i16x8.splat"))]
1349#[stable(feature = "wasm_simd", since = "1.54.0")]
1350pub fn i16x8_splat(a: i16) -> v128 {
1351 simd::i16x8::splat(a).v128()
1352}
1353
1354#[inline]
1358#[cfg_attr(test, assert_instr(i16x8.splat))]
1359#[target_feature(enable = "simd128")]
1360#[doc(alias("i16x8.splat"))]
1361#[stable(feature = "wasm_simd", since = "1.54.0")]
1362pub fn u16x8_splat(a: u16) -> v128 {
1363 simd::u16x8::splat(a).v128()
1364}
1365
1366#[inline]
1370#[cfg_attr(test, assert_instr(i32x4.splat))]
1371#[target_feature(enable = "simd128")]
1372#[doc(alias("i32x4.splat"))]
1373#[stable(feature = "wasm_simd", since = "1.54.0")]
1374pub fn i32x4_splat(a: i32) -> v128 {
1375 simd::i32x4::splat(a).v128()
1376}
1377
1378#[inline]
1382#[target_feature(enable = "simd128")]
1383#[doc(alias("i32x4.splat"))]
1384#[stable(feature = "wasm_simd", since = "1.54.0")]
1385pub fn u32x4_splat(a: u32) -> v128 {
1386 i32x4_splat(a as i32)
1387}
1388
1389#[inline]
1393#[cfg_attr(test, assert_instr(i64x2.splat))]
1394#[target_feature(enable = "simd128")]
1395#[doc(alias("i64x2.splat"))]
1396#[stable(feature = "wasm_simd", since = "1.54.0")]
1397pub fn i64x2_splat(a: i64) -> v128 {
1398 simd::i64x2::splat(a).v128()
1399}
1400
1401#[inline]
1405#[target_feature(enable = "simd128")]
1406#[doc(alias("u64x2.splat"))]
1407#[stable(feature = "wasm_simd", since = "1.54.0")]
1408pub fn u64x2_splat(a: u64) -> v128 {
1409 i64x2_splat(a as i64)
1410}
1411
1412#[inline]
1416#[cfg_attr(test, assert_instr(f32x4.splat))]
1417#[target_feature(enable = "simd128")]
1418#[doc(alias("f32x4.splat"))]
1419#[stable(feature = "wasm_simd", since = "1.54.0")]
1420pub fn f32x4_splat(a: f32) -> v128 {
1421 simd::f32x4::splat(a).v128()
1422}
1423
1424#[inline]
1428#[cfg_attr(test, assert_instr(f64x2.splat))]
1429#[target_feature(enable = "simd128")]
1430#[doc(alias("f64x2.splat"))]
1431#[stable(feature = "wasm_simd", since = "1.54.0")]
1432pub fn f64x2_splat(a: f64) -> v128 {
1433 simd::f64x2::splat(a).v128()
1434}
1435
1436#[inline]
1442#[cfg_attr(test, assert_instr(i8x16.eq))]
1443#[target_feature(enable = "simd128")]
1444#[doc(alias("i8x16.eq"))]
1445#[stable(feature = "wasm_simd", since = "1.54.0")]
1446pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1447 unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1448}
1449
1450#[inline]
1456#[cfg_attr(test, assert_instr(i8x16.ne))]
1457#[target_feature(enable = "simd128")]
1458#[doc(alias("i8x16.ne"))]
1459#[stable(feature = "wasm_simd", since = "1.54.0")]
1460pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1461 unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1462}
1463
1464#[stable(feature = "wasm_simd", since = "1.54.0")]
1465pub use i8x16_eq as u8x16_eq;
1466#[stable(feature = "wasm_simd", since = "1.54.0")]
1467pub use i8x16_ne as u8x16_ne;
1468
1469#[inline]
1475#[cfg_attr(test, assert_instr(i8x16.lt_s))]
1476#[target_feature(enable = "simd128")]
1477#[doc(alias("i8x16.lt_s"))]
1478#[stable(feature = "wasm_simd", since = "1.54.0")]
1479pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1480 unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1481}
1482
1483#[inline]
1489#[cfg_attr(test, assert_instr(i8x16.lt_u))]
1490#[target_feature(enable = "simd128")]
1491#[doc(alias("i8x16.lt_u"))]
1492#[stable(feature = "wasm_simd", since = "1.54.0")]
1493pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1494 unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1495}
1496
1497#[inline]
1503#[cfg_attr(test, assert_instr(i8x16.gt_s))]
1504#[target_feature(enable = "simd128")]
1505#[doc(alias("i8x16.gt_s"))]
1506#[stable(feature = "wasm_simd", since = "1.54.0")]
1507pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1508 unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1509}
1510
1511#[inline]
1517#[cfg_attr(test, assert_instr(i8x16.gt_u))]
1518#[target_feature(enable = "simd128")]
1519#[doc(alias("i8x16.gt_u"))]
1520#[stable(feature = "wasm_simd", since = "1.54.0")]
1521pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1522 unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1523}
1524
1525#[inline]
1531#[cfg_attr(test, assert_instr(i8x16.le_s))]
1532#[target_feature(enable = "simd128")]
1533#[doc(alias("i8x16.le_s"))]
1534#[stable(feature = "wasm_simd", since = "1.54.0")]
1535pub fn i8x16_le(a: v128, b: v128) -> v128 {
1536 unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1537}
1538
1539#[inline]
1545#[cfg_attr(test, assert_instr(i8x16.le_u))]
1546#[target_feature(enable = "simd128")]
1547#[doc(alias("i8x16.le_u"))]
1548#[stable(feature = "wasm_simd", since = "1.54.0")]
1549pub fn u8x16_le(a: v128, b: v128) -> v128 {
1550 unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1551}
1552
1553#[inline]
1559#[cfg_attr(test, assert_instr(i8x16.ge_s))]
1560#[target_feature(enable = "simd128")]
1561#[doc(alias("i8x16.ge_s"))]
1562#[stable(feature = "wasm_simd", since = "1.54.0")]
1563pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1564 unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1565}
1566
1567#[inline]
1573#[cfg_attr(test, assert_instr(i8x16.ge_u))]
1574#[target_feature(enable = "simd128")]
1575#[doc(alias("i8x16.ge_u"))]
1576#[stable(feature = "wasm_simd", since = "1.54.0")]
1577pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1578 unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1579}
1580
1581#[inline]
1587#[cfg_attr(test, assert_instr(i16x8.eq))]
1588#[target_feature(enable = "simd128")]
1589#[doc(alias("i16x8.eq"))]
1590#[stable(feature = "wasm_simd", since = "1.54.0")]
1591pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1592 unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1593}
1594
1595#[inline]
1601#[cfg_attr(test, assert_instr(i16x8.ne))]
1602#[target_feature(enable = "simd128")]
1603#[doc(alias("i16x8.ne"))]
1604#[stable(feature = "wasm_simd", since = "1.54.0")]
1605pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1606 unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1607}
1608
1609#[stable(feature = "wasm_simd", since = "1.54.0")]
1610pub use i16x8_eq as u16x8_eq;
1611#[stable(feature = "wasm_simd", since = "1.54.0")]
1612pub use i16x8_ne as u16x8_ne;
1613
1614#[inline]
1620#[cfg_attr(test, assert_instr(i16x8.lt_s))]
1621#[target_feature(enable = "simd128")]
1622#[doc(alias("i16x8.lt_s"))]
1623#[stable(feature = "wasm_simd", since = "1.54.0")]
1624pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1625 unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1626}
1627
1628#[inline]
1634#[cfg_attr(test, assert_instr(i16x8.lt_u))]
1635#[target_feature(enable = "simd128")]
1636#[doc(alias("i16x8.lt_u"))]
1637#[stable(feature = "wasm_simd", since = "1.54.0")]
1638pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1639 unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1640}
1641
1642#[inline]
1648#[cfg_attr(test, assert_instr(i16x8.gt_s))]
1649#[target_feature(enable = "simd128")]
1650#[doc(alias("i16x8.gt_s"))]
1651#[stable(feature = "wasm_simd", since = "1.54.0")]
1652pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1653 unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1654}
1655
1656#[inline]
1662#[cfg_attr(test, assert_instr(i16x8.gt_u))]
1663#[target_feature(enable = "simd128")]
1664#[doc(alias("i16x8.gt_u"))]
1665#[stable(feature = "wasm_simd", since = "1.54.0")]
1666pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1667 unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1668}
1669
1670#[inline]
1676#[cfg_attr(test, assert_instr(i16x8.le_s))]
1677#[target_feature(enable = "simd128")]
1678#[doc(alias("i16x8.le_s"))]
1679#[stable(feature = "wasm_simd", since = "1.54.0")]
1680pub fn i16x8_le(a: v128, b: v128) -> v128 {
1681 unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1682}
1683
1684#[inline]
1690#[cfg_attr(test, assert_instr(i16x8.le_u))]
1691#[target_feature(enable = "simd128")]
1692#[doc(alias("i16x8.le_u"))]
1693#[stable(feature = "wasm_simd", since = "1.54.0")]
1694pub fn u16x8_le(a: v128, b: v128) -> v128 {
1695 unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1696}
1697
1698#[inline]
1704#[cfg_attr(test, assert_instr(i16x8.ge_s))]
1705#[target_feature(enable = "simd128")]
1706#[doc(alias("i16x8.ge_s"))]
1707#[stable(feature = "wasm_simd", since = "1.54.0")]
1708pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1709 unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1710}
1711
1712#[inline]
1718#[cfg_attr(test, assert_instr(i16x8.ge_u))]
1719#[target_feature(enable = "simd128")]
1720#[doc(alias("i16x8.ge_u"))]
1721#[stable(feature = "wasm_simd", since = "1.54.0")]
1722pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1723 unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1724}
1725
1726#[inline]
1732#[cfg_attr(test, assert_instr(i32x4.eq))]
1733#[target_feature(enable = "simd128")]
1734#[doc(alias("i32x4.eq"))]
1735#[stable(feature = "wasm_simd", since = "1.54.0")]
1736pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1737 unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1738}
1739
1740#[inline]
1746#[cfg_attr(test, assert_instr(i32x4.ne))]
1747#[target_feature(enable = "simd128")]
1748#[doc(alias("i32x4.ne"))]
1749#[stable(feature = "wasm_simd", since = "1.54.0")]
1750pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1751 unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1752}
1753
1754#[stable(feature = "wasm_simd", since = "1.54.0")]
1755pub use i32x4_eq as u32x4_eq;
1756#[stable(feature = "wasm_simd", since = "1.54.0")]
1757pub use i32x4_ne as u32x4_ne;
1758
1759#[inline]
1765#[cfg_attr(test, assert_instr(i32x4.lt_s))]
1766#[target_feature(enable = "simd128")]
1767#[doc(alias("i32x4.lt_s"))]
1768#[stable(feature = "wasm_simd", since = "1.54.0")]
1769pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1770 unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1771}
1772
1773#[inline]
1779#[cfg_attr(test, assert_instr(i32x4.lt_u))]
1780#[target_feature(enable = "simd128")]
1781#[doc(alias("i32x4.lt_u"))]
1782#[stable(feature = "wasm_simd", since = "1.54.0")]
1783pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1784 unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1785}
1786
1787#[inline]
1793#[cfg_attr(test, assert_instr(i32x4.gt_s))]
1794#[target_feature(enable = "simd128")]
1795#[doc(alias("i32x4.gt_s"))]
1796#[stable(feature = "wasm_simd", since = "1.54.0")]
1797pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1798 unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1799}
1800
1801#[inline]
1807#[cfg_attr(test, assert_instr(i32x4.gt_u))]
1808#[target_feature(enable = "simd128")]
1809#[doc(alias("i32x4.gt_u"))]
1810#[stable(feature = "wasm_simd", since = "1.54.0")]
1811pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1812 unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1813}
1814
1815#[inline]
1821#[cfg_attr(test, assert_instr(i32x4.le_s))]
1822#[target_feature(enable = "simd128")]
1823#[doc(alias("i32x4.le_s"))]
1824#[stable(feature = "wasm_simd", since = "1.54.0")]
1825pub fn i32x4_le(a: v128, b: v128) -> v128 {
1826 unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1827}
1828
1829#[inline]
1835#[cfg_attr(test, assert_instr(i32x4.le_u))]
1836#[target_feature(enable = "simd128")]
1837#[doc(alias("i32x4.le_u"))]
1838#[stable(feature = "wasm_simd", since = "1.54.0")]
1839pub fn u32x4_le(a: v128, b: v128) -> v128 {
1840 unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1841}
1842
1843#[inline]
1849#[cfg_attr(test, assert_instr(i32x4.ge_s))]
1850#[target_feature(enable = "simd128")]
1851#[doc(alias("i32x4.ge_s"))]
1852#[stable(feature = "wasm_simd", since = "1.54.0")]
1853pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1854 unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1855}
1856
1857#[inline]
1863#[cfg_attr(test, assert_instr(i32x4.ge_u))]
1864#[target_feature(enable = "simd128")]
1865#[doc(alias("i32x4.ge_u"))]
1866#[stable(feature = "wasm_simd", since = "1.54.0")]
1867pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1868 unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1869}
1870
1871#[inline]
1877#[cfg_attr(test, assert_instr(i64x2.eq))]
1878#[target_feature(enable = "simd128")]
1879#[doc(alias("i64x2.eq"))]
1880#[stable(feature = "wasm_simd", since = "1.54.0")]
1881pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1882 unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1883}
1884
1885#[inline]
1891#[cfg_attr(test, assert_instr(i64x2.ne))]
1892#[target_feature(enable = "simd128")]
1893#[doc(alias("i64x2.ne"))]
1894#[stable(feature = "wasm_simd", since = "1.54.0")]
1895pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1896 unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1897}
1898
1899#[stable(feature = "wasm_simd", since = "1.54.0")]
1900pub use i64x2_eq as u64x2_eq;
1901#[stable(feature = "wasm_simd", since = "1.54.0")]
1902pub use i64x2_ne as u64x2_ne;
1903
1904#[inline]
1910#[cfg_attr(test, assert_instr(i64x2.lt_s))]
1911#[target_feature(enable = "simd128")]
1912#[doc(alias("i64x2.lt_s"))]
1913#[stable(feature = "wasm_simd", since = "1.54.0")]
1914pub fn i64x2_lt(a: v128, b: v128) -> v128 {
1915 unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1916}
1917
1918#[inline]
1924#[cfg_attr(test, assert_instr(i64x2.gt_s))]
1925#[target_feature(enable = "simd128")]
1926#[doc(alias("i64x2.gt_s"))]
1927#[stable(feature = "wasm_simd", since = "1.54.0")]
1928pub fn i64x2_gt(a: v128, b: v128) -> v128 {
1929 unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1930}
1931
1932#[inline]
1938#[cfg_attr(test, assert_instr(i64x2.le_s))]
1939#[target_feature(enable = "simd128")]
1940#[doc(alias("i64x2.le_s"))]
1941#[stable(feature = "wasm_simd", since = "1.54.0")]
1942pub fn i64x2_le(a: v128, b: v128) -> v128 {
1943 unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1944}
1945
1946#[inline]
1952#[cfg_attr(test, assert_instr(i64x2.ge_s))]
1953#[target_feature(enable = "simd128")]
1954#[doc(alias("i64x2.ge_s"))]
1955#[stable(feature = "wasm_simd", since = "1.54.0")]
1956pub fn i64x2_ge(a: v128, b: v128) -> v128 {
1957 unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1958}
1959
1960#[inline]
1966#[cfg_attr(test, assert_instr(f32x4.eq))]
1967#[target_feature(enable = "simd128")]
1968#[doc(alias("f32x4.eq"))]
1969#[stable(feature = "wasm_simd", since = "1.54.0")]
1970pub fn f32x4_eq(a: v128, b: v128) -> v128 {
1971 unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1972}
1973
1974#[inline]
1980#[cfg_attr(test, assert_instr(f32x4.ne))]
1981#[target_feature(enable = "simd128")]
1982#[doc(alias("f32x4.ne"))]
1983#[stable(feature = "wasm_simd", since = "1.54.0")]
1984pub fn f32x4_ne(a: v128, b: v128) -> v128 {
1985 unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1986}
1987
1988#[inline]
1994#[cfg_attr(test, assert_instr(f32x4.lt))]
1995#[target_feature(enable = "simd128")]
1996#[doc(alias("f32x4.lt"))]
1997#[stable(feature = "wasm_simd", since = "1.54.0")]
1998pub fn f32x4_lt(a: v128, b: v128) -> v128 {
1999 unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2000}
2001
2002#[inline]
2008#[cfg_attr(test, assert_instr(f32x4.gt))]
2009#[target_feature(enable = "simd128")]
2010#[doc(alias("f32x4.gt"))]
2011#[stable(feature = "wasm_simd", since = "1.54.0")]
2012pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2013 unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2014}
2015
2016#[inline]
2022#[cfg_attr(test, assert_instr(f32x4.le))]
2023#[target_feature(enable = "simd128")]
2024#[doc(alias("f32x4.le"))]
2025#[stable(feature = "wasm_simd", since = "1.54.0")]
2026pub fn f32x4_le(a: v128, b: v128) -> v128 {
2027 unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2028}
2029
2030#[inline]
2036#[cfg_attr(test, assert_instr(f32x4.ge))]
2037#[target_feature(enable = "simd128")]
2038#[doc(alias("f32x4.ge"))]
2039#[stable(feature = "wasm_simd", since = "1.54.0")]
2040pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2041 unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2042}
2043
2044#[inline]
2050#[cfg_attr(test, assert_instr(f64x2.eq))]
2051#[target_feature(enable = "simd128")]
2052#[doc(alias("f64x2.eq"))]
2053#[stable(feature = "wasm_simd", since = "1.54.0")]
2054pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2055 unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2056}
2057
2058#[inline]
2064#[cfg_attr(test, assert_instr(f64x2.ne))]
2065#[target_feature(enable = "simd128")]
2066#[doc(alias("f64x2.ne"))]
2067#[stable(feature = "wasm_simd", since = "1.54.0")]
2068pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2069 unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2070}
2071
2072#[inline]
2078#[cfg_attr(test, assert_instr(f64x2.lt))]
2079#[target_feature(enable = "simd128")]
2080#[doc(alias("f64x2.lt"))]
2081#[stable(feature = "wasm_simd", since = "1.54.0")]
2082pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2083 unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2084}
2085
2086#[inline]
2092#[cfg_attr(test, assert_instr(f64x2.gt))]
2093#[target_feature(enable = "simd128")]
2094#[doc(alias("f64x2.gt"))]
2095#[stable(feature = "wasm_simd", since = "1.54.0")]
2096pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2097 unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2098}
2099
2100#[inline]
2106#[cfg_attr(test, assert_instr(f64x2.le))]
2107#[target_feature(enable = "simd128")]
2108#[doc(alias("f64x2.le"))]
2109#[stable(feature = "wasm_simd", since = "1.54.0")]
2110pub fn f64x2_le(a: v128, b: v128) -> v128 {
2111 unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2112}
2113
2114#[inline]
2120#[cfg_attr(test, assert_instr(f64x2.ge))]
2121#[target_feature(enable = "simd128")]
2122#[doc(alias("f64x2.ge"))]
2123#[stable(feature = "wasm_simd", since = "1.54.0")]
2124pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2125 unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2126}
2127
2128#[inline]
2130#[cfg_attr(test, assert_instr(v128.not))]
2131#[target_feature(enable = "simd128")]
2132#[doc(alias("v128.not"))]
2133#[stable(feature = "wasm_simd", since = "1.54.0")]
2134pub fn v128_not(a: v128) -> v128 {
2135 unsafe { simd_xor(a.as_i64x2(), simd::i64x2::new(!0, !0)).v128() }
2136}
2137
2138#[inline]
2141#[cfg_attr(test, assert_instr(v128.and))]
2142#[target_feature(enable = "simd128")]
2143#[doc(alias("v128.and"))]
2144#[stable(feature = "wasm_simd", since = "1.54.0")]
2145pub fn v128_and(a: v128, b: v128) -> v128 {
2146 unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2147}
2148
2149#[inline]
2153#[cfg_attr(test, assert_instr(v128.andnot))]
2154#[target_feature(enable = "simd128")]
2155#[doc(alias("v128.andnot"))]
2156#[stable(feature = "wasm_simd", since = "1.54.0")]
2157pub fn v128_andnot(a: v128, b: v128) -> v128 {
2158 unsafe {
2159 simd_and(
2160 a.as_i64x2(),
2161 simd_xor(b.as_i64x2(), simd::i64x2::new(-1, -1)),
2162 )
2163 .v128()
2164 }
2165}
2166
2167#[inline]
2170#[cfg_attr(test, assert_instr(v128.or))]
2171#[target_feature(enable = "simd128")]
2172#[doc(alias("v128.or"))]
2173#[stable(feature = "wasm_simd", since = "1.54.0")]
2174pub fn v128_or(a: v128, b: v128) -> v128 {
2175 unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2176}
2177
2178#[inline]
2181#[cfg_attr(test, assert_instr(v128.xor))]
2182#[target_feature(enable = "simd128")]
2183#[doc(alias("v128.xor"))]
2184#[stable(feature = "wasm_simd", since = "1.54.0")]
2185pub fn v128_xor(a: v128, b: v128) -> v128 {
2186 unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2187}
2188
2189#[inline]
2191#[cfg_attr(test, assert_instr(v128.bitselect))]
2192#[target_feature(enable = "simd128")]
2193#[doc(alias("v128.bitselect"))]
2194#[stable(feature = "wasm_simd", since = "1.54.0")]
2195pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2196 unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2197}
2198
2199#[inline]
2201#[cfg_attr(test, assert_instr(v128.any_true))]
2202#[target_feature(enable = "simd128")]
2203#[doc(alias("v128.any_true"))]
2204#[stable(feature = "wasm_simd", since = "1.54.0")]
2205pub fn v128_any_true(a: v128) -> bool {
2206 unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2207}
2208
2209#[inline]
2211#[cfg_attr(test, assert_instr(i8x16.abs))]
2212#[target_feature(enable = "simd128")]
2213#[doc(alias("i8x16.abs"))]
2214#[stable(feature = "wasm_simd", since = "1.54.0")]
2215pub fn i8x16_abs(a: v128) -> v128 {
2216 unsafe {
2217 let a = a.as_i8x16();
2218 let zero = simd::i8x16::ZERO;
2219 simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2220 }
2221}
2222
2223#[inline]
2225#[cfg_attr(test, assert_instr(i8x16.neg))]
2226#[target_feature(enable = "simd128")]
2227#[doc(alias("i8x16.neg"))]
2228#[stable(feature = "wasm_simd", since = "1.54.0")]
2229pub fn i8x16_neg(a: v128) -> v128 {
2230 unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2231}
2232
2233#[inline]
2235#[cfg_attr(test, assert_instr(i8x16.popcnt))]
2236#[target_feature(enable = "simd128")]
2237#[doc(alias("i8x16.popcnt"))]
2238#[stable(feature = "wasm_simd", since = "1.54.0")]
2239pub fn i8x16_popcnt(v: v128) -> v128 {
2240 unsafe { simd_ctpop(v.as_i8x16()).v128() }
2241}
2242
2243#[stable(feature = "wasm_simd", since = "1.54.0")]
2244pub use i8x16_popcnt as u8x16_popcnt;
2245
2246#[inline]
2248#[cfg_attr(test, assert_instr(i8x16.all_true))]
2249#[target_feature(enable = "simd128")]
2250#[doc(alias("i8x16.all_true"))]
2251#[stable(feature = "wasm_simd", since = "1.54.0")]
2252pub fn i8x16_all_true(a: v128) -> bool {
2253 unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2254}
2255
2256#[stable(feature = "wasm_simd", since = "1.54.0")]
2257pub use i8x16_all_true as u8x16_all_true;
2258
2259#[inline]
2262#[cfg_attr(test, assert_instr(i8x16.bitmask))]
2263#[target_feature(enable = "simd128")]
2264#[doc(alias("i8x16.bitmask"))]
2265#[stable(feature = "wasm_simd", since = "1.54.0")]
2266pub fn i8x16_bitmask(a: v128) -> u16 {
2267 unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2268}
2269
2270#[stable(feature = "wasm_simd", since = "1.54.0")]
2271pub use i8x16_bitmask as u8x16_bitmask;
2272
2273#[inline]
2279#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2280#[target_feature(enable = "simd128")]
2281#[doc(alias("i8x16.narrow_i16x8_s"))]
2282#[stable(feature = "wasm_simd", since = "1.54.0")]
2283pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2284 unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() }
2285}
2286
2287#[inline]
2293#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2294#[target_feature(enable = "simd128")]
2295#[doc(alias("i8x16.narrow_i16x8_u"))]
2296#[stable(feature = "wasm_simd", since = "1.54.0")]
2297pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2298 unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() }
2299}
2300
2301#[inline]
2306#[cfg_attr(test, assert_instr(i8x16.shl))]
2307#[target_feature(enable = "simd128")]
2308#[doc(alias("i8x16.shl"))]
2309#[stable(feature = "wasm_simd", since = "1.54.0")]
2310pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2311 unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2330}
2331
2332#[stable(feature = "wasm_simd", since = "1.54.0")]
2333pub use i8x16_shl as u8x16_shl;
2334
2335#[inline]
2341#[cfg_attr(test, assert_instr(i8x16.shr_s))]
2342#[target_feature(enable = "simd128")]
2343#[doc(alias("i8x16.shr_s"))]
2344#[stable(feature = "wasm_simd", since = "1.54.0")]
2345pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2346 unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2349}
2350
2351#[inline]
2357#[cfg_attr(test, assert_instr(i8x16.shr_u))]
2358#[target_feature(enable = "simd128")]
2359#[doc(alias("i8x16.shr_u"))]
2360#[stable(feature = "wasm_simd", since = "1.54.0")]
2361pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2362 unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat((amt & 0x7) as u8)).v128() }
2365}
2366
2367#[inline]
2369#[cfg_attr(test, assert_instr(i8x16.add))]
2370#[target_feature(enable = "simd128")]
2371#[doc(alias("i8x16.add"))]
2372#[stable(feature = "wasm_simd", since = "1.54.0")]
2373pub fn i8x16_add(a: v128, b: v128) -> v128 {
2374 unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2375}
2376
2377#[stable(feature = "wasm_simd", since = "1.54.0")]
2378pub use i8x16_add as u8x16_add;
2379
2380#[inline]
2383#[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2384#[target_feature(enable = "simd128")]
2385#[doc(alias("i8x16.add_sat_s"))]
2386#[stable(feature = "wasm_simd", since = "1.54.0")]
2387pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2388 unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
2389}
2390
2391#[inline]
2394#[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2395#[target_feature(enable = "simd128")]
2396#[doc(alias("i8x16.add_sat_u"))]
2397#[stable(feature = "wasm_simd", since = "1.54.0")]
2398pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2399 unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
2400}
2401
2402#[inline]
2404#[cfg_attr(test, assert_instr(i8x16.sub))]
2405#[target_feature(enable = "simd128")]
2406#[doc(alias("i8x16.sub"))]
2407#[stable(feature = "wasm_simd", since = "1.54.0")]
2408pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2409 unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2410}
2411
2412#[stable(feature = "wasm_simd", since = "1.54.0")]
2413pub use i8x16_sub as u8x16_sub;
2414
2415#[inline]
2418#[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2419#[target_feature(enable = "simd128")]
2420#[doc(alias("i8x16.sub_sat_s"))]
2421#[stable(feature = "wasm_simd", since = "1.54.0")]
2422pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2423 unsafe { simd_saturating_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2424}
2425
2426#[inline]
2429#[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2430#[target_feature(enable = "simd128")]
2431#[doc(alias("i8x16.sub_sat_u"))]
2432#[stable(feature = "wasm_simd", since = "1.54.0")]
2433pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2434 unsafe { simd_saturating_sub(a.as_u8x16(), b.as_u8x16()).v128() }
2435}
2436
2437#[inline]
2440#[cfg_attr(test, assert_instr(i8x16.min_s))]
2441#[target_feature(enable = "simd128")]
2442#[doc(alias("i8x16.min_s"))]
2443#[stable(feature = "wasm_simd", since = "1.54.0")]
2444pub fn i8x16_min(a: v128, b: v128) -> v128 {
2445 let a = a.as_i8x16();
2446 let b = b.as_i8x16();
2447 unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2448}
2449
2450#[inline]
2453#[cfg_attr(test, assert_instr(i8x16.min_u))]
2454#[target_feature(enable = "simd128")]
2455#[doc(alias("i8x16.min_u"))]
2456#[stable(feature = "wasm_simd", since = "1.54.0")]
2457pub fn u8x16_min(a: v128, b: v128) -> v128 {
2458 let a = a.as_u8x16();
2459 let b = b.as_u8x16();
2460 unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2461}
2462
2463#[inline]
2466#[cfg_attr(test, assert_instr(i8x16.max_s))]
2467#[target_feature(enable = "simd128")]
2468#[doc(alias("i8x16.max_s"))]
2469#[stable(feature = "wasm_simd", since = "1.54.0")]
2470pub fn i8x16_max(a: v128, b: v128) -> v128 {
2471 let a = a.as_i8x16();
2472 let b = b.as_i8x16();
2473 unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2474}
2475
2476#[inline]
2479#[cfg_attr(test, assert_instr(i8x16.max_u))]
2480#[target_feature(enable = "simd128")]
2481#[doc(alias("i8x16.max_u"))]
2482#[stable(feature = "wasm_simd", since = "1.54.0")]
2483pub fn u8x16_max(a: v128, b: v128) -> v128 {
2484 let a = a.as_u8x16();
2485 let b = b.as_u8x16();
2486 unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2487}
2488
2489#[inline]
2491#[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2492#[target_feature(enable = "simd128")]
2493#[doc(alias("i8x16.avgr_u"))]
2494#[stable(feature = "wasm_simd", since = "1.54.0")]
2495pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2496 unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2497}
2498
2499#[inline]
2502#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2503#[target_feature(enable = "simd128")]
2504#[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2505#[stable(feature = "wasm_simd", since = "1.54.0")]
2506pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2507 unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2508}
2509
2510#[inline]
2513#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2514#[target_feature(enable = "simd128")]
2515#[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2516#[stable(feature = "wasm_simd", since = "1.54.0")]
2517pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2518 unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2519}
2520
2521#[stable(feature = "wasm_simd", since = "1.54.0")]
2522pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2523
2524#[inline]
2526#[cfg_attr(test, assert_instr(i16x8.abs))]
2527#[target_feature(enable = "simd128")]
2528#[doc(alias("i16x8.abs"))]
2529#[stable(feature = "wasm_simd", since = "1.54.0")]
2530pub fn i16x8_abs(a: v128) -> v128 {
2531 let a = a.as_i16x8();
2532 let zero = simd::i16x8::ZERO;
2533 unsafe {
2534 simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2535 }
2536}
2537
2538#[inline]
2540#[cfg_attr(test, assert_instr(i16x8.neg))]
2541#[target_feature(enable = "simd128")]
2542#[doc(alias("i16x8.neg"))]
2543#[stable(feature = "wasm_simd", since = "1.54.0")]
2544pub fn i16x8_neg(a: v128) -> v128 {
2545 unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2546}
2547
2548#[inline]
2550#[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2551#[target_feature(enable = "simd128")]
2552#[doc(alias("i16x8.q15mulr_sat_s"))]
2553#[stable(feature = "wasm_simd", since = "1.54.0")]
2554pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2555 unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2556}
2557
2558#[inline]
2560#[cfg_attr(test, assert_instr(i16x8.all_true))]
2561#[target_feature(enable = "simd128")]
2562#[doc(alias("i16x8.all_true"))]
2563#[stable(feature = "wasm_simd", since = "1.54.0")]
2564pub fn i16x8_all_true(a: v128) -> bool {
2565 unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2566}
2567
2568#[stable(feature = "wasm_simd", since = "1.54.0")]
2569pub use i16x8_all_true as u16x8_all_true;
2570
2571#[inline]
2574#[cfg_attr(test, assert_instr(i16x8.bitmask))]
2575#[target_feature(enable = "simd128")]
2576#[doc(alias("i16x8.bitmask"))]
2577#[stable(feature = "wasm_simd", since = "1.54.0")]
2578pub fn i16x8_bitmask(a: v128) -> u8 {
2579 unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2580}
2581
2582#[stable(feature = "wasm_simd", since = "1.54.0")]
2583pub use i16x8_bitmask as u16x8_bitmask;
2584
2585#[inline]
2591#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2592#[target_feature(enable = "simd128")]
2593#[doc(alias("i16x8.narrow_i32x4_s"))]
2594#[stable(feature = "wasm_simd", since = "1.54.0")]
2595pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2596 unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() }
2597}
2598
2599#[inline]
2605#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2606#[target_feature(enable = "simd128")]
2607#[doc(alias("i16x8.narrow_i32x4_u"))]
2608#[stable(feature = "wasm_simd", since = "1.54.0")]
2609pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2610 unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() }
2611}
2612
2613#[inline]
2616#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2617#[target_feature(enable = "simd128")]
2618#[doc(alias("i16x8.extend_low_i8x16_s"))]
2619#[stable(feature = "wasm_simd", since = "1.54.0")]
2620pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2621 unsafe {
2622 simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2623 a.as_i8x16(),
2624 a.as_i8x16(),
2625 [0, 1, 2, 3, 4, 5, 6, 7],
2626 ))
2627 .v128()
2628 }
2629}
2630
2631#[inline]
2634#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2635#[target_feature(enable = "simd128")]
2636#[doc(alias("i16x8.extend_high_i8x16_s"))]
2637#[stable(feature = "wasm_simd", since = "1.54.0")]
2638pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2639 unsafe {
2640 simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2641 a.as_i8x16(),
2642 a.as_i8x16(),
2643 [8, 9, 10, 11, 12, 13, 14, 15],
2644 ))
2645 .v128()
2646 }
2647}
2648
2649#[inline]
2652#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2653#[target_feature(enable = "simd128")]
2654#[doc(alias("i16x8.extend_low_i8x16_u"))]
2655#[stable(feature = "wasm_simd", since = "1.54.0")]
2656pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2657 unsafe {
2658 simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2659 a.as_u8x16(),
2660 a.as_u8x16(),
2661 [0, 1, 2, 3, 4, 5, 6, 7],
2662 ))
2663 .v128()
2664 }
2665}
2666
2667#[stable(feature = "wasm_simd", since = "1.54.0")]
2668pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2669
2670#[inline]
2673#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2674#[target_feature(enable = "simd128")]
2675#[doc(alias("i16x8.extend_high_i8x16_u"))]
2676#[stable(feature = "wasm_simd", since = "1.54.0")]
2677pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2678 unsafe {
2679 simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2680 a.as_u8x16(),
2681 a.as_u8x16(),
2682 [8, 9, 10, 11, 12, 13, 14, 15],
2683 ))
2684 .v128()
2685 }
2686}
2687
2688#[stable(feature = "wasm_simd", since = "1.54.0")]
2689pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2690
2691#[inline]
2696#[cfg_attr(test, assert_instr(i16x8.shl))]
2697#[target_feature(enable = "simd128")]
2698#[doc(alias("i16x8.shl"))]
2699#[stable(feature = "wasm_simd", since = "1.54.0")]
2700pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2701 unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2704}
2705
2706#[stable(feature = "wasm_simd", since = "1.54.0")]
2707pub use i16x8_shl as u16x8_shl;
2708
2709#[inline]
2715#[cfg_attr(test, assert_instr(i16x8.shr_s))]
2716#[target_feature(enable = "simd128")]
2717#[doc(alias("i16x8.shr_s"))]
2718#[stable(feature = "wasm_simd", since = "1.54.0")]
2719pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2720 unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2723}
2724
2725#[inline]
2731#[cfg_attr(test, assert_instr(i16x8.shr_u))]
2732#[target_feature(enable = "simd128")]
2733#[doc(alias("i16x8.shr_u"))]
2734#[stable(feature = "wasm_simd", since = "1.54.0")]
2735pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2736 unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat((amt & 0xf) as u16)).v128() }
2739}
2740
2741#[inline]
2743#[cfg_attr(test, assert_instr(i16x8.add))]
2744#[target_feature(enable = "simd128")]
2745#[doc(alias("i16x8.add"))]
2746#[stable(feature = "wasm_simd", since = "1.54.0")]
2747pub fn i16x8_add(a: v128, b: v128) -> v128 {
2748 unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2749}
2750
2751#[stable(feature = "wasm_simd", since = "1.54.0")]
2752pub use i16x8_add as u16x8_add;
2753
2754#[inline]
2757#[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2758#[target_feature(enable = "simd128")]
2759#[doc(alias("i16x8.add_sat_s"))]
2760#[stable(feature = "wasm_simd", since = "1.54.0")]
2761pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2762 unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
2763}
2764
2765#[inline]
2768#[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2769#[target_feature(enable = "simd128")]
2770#[doc(alias("i16x8.add_sat_u"))]
2771#[stable(feature = "wasm_simd", since = "1.54.0")]
2772pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2773 unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
2774}
2775
2776#[inline]
2778#[cfg_attr(test, assert_instr(i16x8.sub))]
2779#[target_feature(enable = "simd128")]
2780#[doc(alias("i16x8.sub"))]
2781#[stable(feature = "wasm_simd", since = "1.54.0")]
2782pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2783 unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2784}
2785
2786#[stable(feature = "wasm_simd", since = "1.54.0")]
2787pub use i16x8_sub as u16x8_sub;
2788
2789#[inline]
2792#[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2793#[target_feature(enable = "simd128")]
2794#[doc(alias("i16x8.sub_sat_s"))]
2795#[stable(feature = "wasm_simd", since = "1.54.0")]
2796pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2797 unsafe { simd_saturating_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2798}
2799
2800#[inline]
2803#[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2804#[target_feature(enable = "simd128")]
2805#[doc(alias("i16x8.sub_sat_u"))]
2806#[stable(feature = "wasm_simd", since = "1.54.0")]
2807pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2808 unsafe { simd_saturating_sub(a.as_u16x8(), b.as_u16x8()).v128() }
2809}
2810
2811#[inline]
2814#[cfg_attr(test, assert_instr(i16x8.mul))]
2815#[target_feature(enable = "simd128")]
2816#[doc(alias("i16x8.mul"))]
2817#[stable(feature = "wasm_simd", since = "1.54.0")]
2818pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2819 unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2820}
2821
2822#[stable(feature = "wasm_simd", since = "1.54.0")]
2823pub use i16x8_mul as u16x8_mul;
2824
2825#[inline]
2828#[cfg_attr(test, assert_instr(i16x8.min_s))]
2829#[target_feature(enable = "simd128")]
2830#[doc(alias("i16x8.min_s"))]
2831#[stable(feature = "wasm_simd", since = "1.54.0")]
2832pub fn i16x8_min(a: v128, b: v128) -> v128 {
2833 let a = a.as_i16x8();
2834 let b = b.as_i16x8();
2835 unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2836}
2837
2838#[inline]
2841#[cfg_attr(test, assert_instr(i16x8.min_u))]
2842#[target_feature(enable = "simd128")]
2843#[doc(alias("i16x8.min_u"))]
2844#[stable(feature = "wasm_simd", since = "1.54.0")]
2845pub fn u16x8_min(a: v128, b: v128) -> v128 {
2846 let a = a.as_u16x8();
2847 let b = b.as_u16x8();
2848 unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2849}
2850
2851#[inline]
2854#[cfg_attr(test, assert_instr(i16x8.max_s))]
2855#[target_feature(enable = "simd128")]
2856#[doc(alias("i16x8.max_s"))]
2857#[stable(feature = "wasm_simd", since = "1.54.0")]
2858pub fn i16x8_max(a: v128, b: v128) -> v128 {
2859 let a = a.as_i16x8();
2860 let b = b.as_i16x8();
2861 unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2862}
2863
2864#[inline]
2867#[cfg_attr(test, assert_instr(i16x8.max_u))]
2868#[target_feature(enable = "simd128")]
2869#[doc(alias("i16x8.max_u"))]
2870#[stable(feature = "wasm_simd", since = "1.54.0")]
2871pub fn u16x8_max(a: v128, b: v128) -> v128 {
2872 let a = a.as_u16x8();
2873 let b = b.as_u16x8();
2874 unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2875}
2876
2877#[inline]
2879#[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2880#[target_feature(enable = "simd128")]
2881#[doc(alias("i16x8.avgr_u"))]
2882#[stable(feature = "wasm_simd", since = "1.54.0")]
2883pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2884 unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2885}
2886
2887#[inline]
2892#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2893#[target_feature(enable = "simd128")]
2894#[doc(alias("i16x8.extmul_low_i8x16_s"))]
2895#[stable(feature = "wasm_simd", since = "1.54.0")]
2896pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2897 unsafe {
2898 let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2899 a.as_i8x16(),
2900 a.as_i8x16(),
2901 [0, 1, 2, 3, 4, 5, 6, 7],
2902 ));
2903 let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2904 b.as_i8x16(),
2905 b.as_i8x16(),
2906 [0, 1, 2, 3, 4, 5, 6, 7],
2907 ));
2908 simd_mul(lhs, rhs).v128()
2909 }
2910}
2911
2912#[inline]
2917#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2918#[target_feature(enable = "simd128")]
2919#[doc(alias("i16x8.extmul_high_i8x16_s"))]
2920#[stable(feature = "wasm_simd", since = "1.54.0")]
2921pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2922 unsafe {
2923 let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2924 a.as_i8x16(),
2925 a.as_i8x16(),
2926 [8, 9, 10, 11, 12, 13, 14, 15],
2927 ));
2928 let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2929 b.as_i8x16(),
2930 b.as_i8x16(),
2931 [8, 9, 10, 11, 12, 13, 14, 15],
2932 ));
2933 simd_mul(lhs, rhs).v128()
2934 }
2935}
2936
2937#[inline]
2942#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
2943#[target_feature(enable = "simd128")]
2944#[doc(alias("i16x8.extmul_low_i8x16_u"))]
2945#[stable(feature = "wasm_simd", since = "1.54.0")]
2946pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
2947 unsafe {
2948 let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2949 a.as_u8x16(),
2950 a.as_u8x16(),
2951 [0, 1, 2, 3, 4, 5, 6, 7],
2952 ));
2953 let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2954 b.as_u8x16(),
2955 b.as_u8x16(),
2956 [0, 1, 2, 3, 4, 5, 6, 7],
2957 ));
2958 simd_mul(lhs, rhs).v128()
2959 }
2960}
2961
2962#[stable(feature = "wasm_simd", since = "1.54.0")]
2963pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
2964
2965#[inline]
2970#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
2971#[target_feature(enable = "simd128")]
2972#[doc(alias("i16x8.extmul_high_i8x16_u"))]
2973#[stable(feature = "wasm_simd", since = "1.54.0")]
2974pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
2975 unsafe {
2976 let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2977 a.as_u8x16(),
2978 a.as_u8x16(),
2979 [8, 9, 10, 11, 12, 13, 14, 15],
2980 ));
2981 let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2982 b.as_u8x16(),
2983 b.as_u8x16(),
2984 [8, 9, 10, 11, 12, 13, 14, 15],
2985 ));
2986 simd_mul(lhs, rhs).v128()
2987 }
2988}
2989
2990#[stable(feature = "wasm_simd", since = "1.54.0")]
2991pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
2992
2993#[inline]
2996#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
2997#[target_feature(enable = "simd128")]
2998#[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
2999#[stable(feature = "wasm_simd", since = "1.54.0")]
3000pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
3001 unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
3002}
3003
3004#[inline]
3007#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
3008#[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
3009#[target_feature(enable = "simd128")]
3010#[stable(feature = "wasm_simd", since = "1.54.0")]
3011pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
3012 unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
3013}
3014
3015#[stable(feature = "wasm_simd", since = "1.54.0")]
3016pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
3017
3018#[inline]
3020#[cfg_attr(test, assert_instr(i32x4.abs))]
3021#[target_feature(enable = "simd128")]
3022#[doc(alias("i32x4.abs"))]
3023#[stable(feature = "wasm_simd", since = "1.54.0")]
3024pub fn i32x4_abs(a: v128) -> v128 {
3025 let a = a.as_i32x4();
3026 let zero = simd::i32x4::ZERO;
3027 unsafe {
3028 simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3029 }
3030}
3031
3032#[inline]
3034#[cfg_attr(test, assert_instr(i32x4.neg))]
3035#[target_feature(enable = "simd128")]
3036#[doc(alias("i32x4.neg"))]
3037#[stable(feature = "wasm_simd", since = "1.54.0")]
3038pub fn i32x4_neg(a: v128) -> v128 {
3039 unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3040}
3041
3042#[inline]
3044#[cfg_attr(test, assert_instr(i32x4.all_true))]
3045#[target_feature(enable = "simd128")]
3046#[doc(alias("i32x4.all_true"))]
3047#[stable(feature = "wasm_simd", since = "1.54.0")]
3048pub fn i32x4_all_true(a: v128) -> bool {
3049 unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3050}
3051
3052#[stable(feature = "wasm_simd", since = "1.54.0")]
3053pub use i32x4_all_true as u32x4_all_true;
3054
3055#[inline]
3058#[cfg_attr(test, assert_instr(i32x4.bitmask))]
3059#[target_feature(enable = "simd128")]
3060#[doc(alias("i32x4.bitmask"))]
3061#[stable(feature = "wasm_simd", since = "1.54.0")]
3062pub fn i32x4_bitmask(a: v128) -> u8 {
3063 unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3064}
3065
3066#[stable(feature = "wasm_simd", since = "1.54.0")]
3067pub use i32x4_bitmask as u32x4_bitmask;
3068
3069#[inline]
3072#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3073#[target_feature(enable = "simd128")]
3074#[doc(alias("i32x4.extend_low_i16x8_s"))]
3075#[stable(feature = "wasm_simd", since = "1.54.0")]
3076pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3077 unsafe {
3078 simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3079 a.as_i16x8(),
3080 a.as_i16x8(),
3081 [0, 1, 2, 3]
3082 ))
3083 .v128()
3084 }
3085}
3086
3087#[inline]
3090#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3091#[target_feature(enable = "simd128")]
3092#[doc(alias("i32x4.extend_high_i16x8_s"))]
3093#[stable(feature = "wasm_simd", since = "1.54.0")]
3094pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3095 unsafe {
3096 simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3097 a.as_i16x8(),
3098 a.as_i16x8(),
3099 [4, 5, 6, 7]
3100 ))
3101 .v128()
3102 }
3103}
3104
3105#[inline]
3108#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3109#[target_feature(enable = "simd128")]
3110#[doc(alias("i32x4.extend_low_i16x8_u"))]
3111#[stable(feature = "wasm_simd", since = "1.54.0")]
3112pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3113 unsafe {
3114 simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3115 a.as_u16x8(),
3116 a.as_u16x8(),
3117 [0, 1, 2, 3]
3118 ))
3119 .v128()
3120 }
3121}
3122
3123#[stable(feature = "wasm_simd", since = "1.54.0")]
3124pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3125
3126#[inline]
3129#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3130#[target_feature(enable = "simd128")]
3131#[doc(alias("i32x4.extend_high_i16x8_u"))]
3132#[stable(feature = "wasm_simd", since = "1.54.0")]
3133pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3134 unsafe {
3135 simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3136 a.as_u16x8(),
3137 a.as_u16x8(),
3138 [4, 5, 6, 7]
3139 ))
3140 .v128()
3141 }
3142}
3143
3144#[stable(feature = "wasm_simd", since = "1.54.0")]
3145pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3146
3147#[inline]
3152#[cfg_attr(test, assert_instr(i32x4.shl))]
3153#[target_feature(enable = "simd128")]
3154#[doc(alias("i32x4.shl"))]
3155#[stable(feature = "wasm_simd", since = "1.54.0")]
3156pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3157 unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3160}
3161
3162#[stable(feature = "wasm_simd", since = "1.54.0")]
3163pub use i32x4_shl as u32x4_shl;
3164
3165#[inline]
3171#[cfg_attr(test, assert_instr(i32x4.shr_s))]
3172#[target_feature(enable = "simd128")]
3173#[doc(alias("i32x4.shr_s"))]
3174#[stable(feature = "wasm_simd", since = "1.54.0")]
3175pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3176 unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3179}
3180
3181#[inline]
3187#[cfg_attr(test, assert_instr(i32x4.shr_u))]
3188#[target_feature(enable = "simd128")]
3189#[doc(alias("i32x4.shr_u"))]
3190#[stable(feature = "wasm_simd", since = "1.54.0")]
3191pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3192 unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt & 0x1f)).v128() }
3195}
3196
3197#[inline]
3199#[cfg_attr(test, assert_instr(i32x4.add))]
3200#[target_feature(enable = "simd128")]
3201#[doc(alias("i32x4.add"))]
3202#[stable(feature = "wasm_simd", since = "1.54.0")]
3203pub fn i32x4_add(a: v128, b: v128) -> v128 {
3204 unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3205}
3206
3207#[stable(feature = "wasm_simd", since = "1.54.0")]
3208pub use i32x4_add as u32x4_add;
3209
3210#[inline]
3212#[cfg_attr(test, assert_instr(i32x4.sub))]
3213#[target_feature(enable = "simd128")]
3214#[doc(alias("i32x4.sub"))]
3215#[stable(feature = "wasm_simd", since = "1.54.0")]
3216pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3217 unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3218}
3219
3220#[stable(feature = "wasm_simd", since = "1.54.0")]
3221pub use i32x4_sub as u32x4_sub;
3222
3223#[inline]
3226#[cfg_attr(test, assert_instr(i32x4.mul))]
3227#[target_feature(enable = "simd128")]
3228#[doc(alias("i32x4.mul"))]
3229#[stable(feature = "wasm_simd", since = "1.54.0")]
3230pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3231 unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3232}
3233
3234#[stable(feature = "wasm_simd", since = "1.54.0")]
3235pub use i32x4_mul as u32x4_mul;
3236
3237#[inline]
3240#[cfg_attr(test, assert_instr(i32x4.min_s))]
3241#[target_feature(enable = "simd128")]
3242#[doc(alias("i32x4.min_s"))]
3243#[stable(feature = "wasm_simd", since = "1.54.0")]
3244pub fn i32x4_min(a: v128, b: v128) -> v128 {
3245 let a = a.as_i32x4();
3246 let b = b.as_i32x4();
3247 unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3248}
3249
3250#[inline]
3253#[cfg_attr(test, assert_instr(i32x4.min_u))]
3254#[target_feature(enable = "simd128")]
3255#[doc(alias("i32x4.min_u"))]
3256#[stable(feature = "wasm_simd", since = "1.54.0")]
3257pub fn u32x4_min(a: v128, b: v128) -> v128 {
3258 let a = a.as_u32x4();
3259 let b = b.as_u32x4();
3260 unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3261}
3262
3263#[inline]
3266#[cfg_attr(test, assert_instr(i32x4.max_s))]
3267#[target_feature(enable = "simd128")]
3268#[doc(alias("i32x4.max_s"))]
3269#[stable(feature = "wasm_simd", since = "1.54.0")]
3270pub fn i32x4_max(a: v128, b: v128) -> v128 {
3271 let a = a.as_i32x4();
3272 let b = b.as_i32x4();
3273 unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3274}
3275
3276#[inline]
3279#[cfg_attr(test, assert_instr(i32x4.max_u))]
3280#[target_feature(enable = "simd128")]
3281#[doc(alias("i32x4.max_u"))]
3282#[stable(feature = "wasm_simd", since = "1.54.0")]
3283pub fn u32x4_max(a: v128, b: v128) -> v128 {
3284 let a = a.as_u32x4();
3285 let b = b.as_u32x4();
3286 unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3287}
3288
3289#[inline]
3292#[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3293#[target_feature(enable = "simd128")]
3294#[doc(alias("i32x4.dot_i16x8_s"))]
3295#[stable(feature = "wasm_simd", since = "1.54.0")]
3296pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3297 unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3298}
3299
3300#[inline]
3305#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3306#[target_feature(enable = "simd128")]
3307#[doc(alias("i32x4.extmul_low_i16x8_s"))]
3308#[stable(feature = "wasm_simd", since = "1.54.0")]
3309pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3310 unsafe {
3311 let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3312 a.as_i16x8(),
3313 a.as_i16x8(),
3314 [0, 1, 2, 3]
3315 ));
3316 let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3317 b.as_i16x8(),
3318 b.as_i16x8(),
3319 [0, 1, 2, 3]
3320 ));
3321 simd_mul(lhs, rhs).v128()
3322 }
3323}
3324
3325#[inline]
3330#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3331#[target_feature(enable = "simd128")]
3332#[doc(alias("i32x4.extmul_high_i16x8_s"))]
3333#[stable(feature = "wasm_simd", since = "1.54.0")]
3334pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3335 unsafe {
3336 let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3337 a.as_i16x8(),
3338 a.as_i16x8(),
3339 [4, 5, 6, 7]
3340 ));
3341 let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3342 b.as_i16x8(),
3343 b.as_i16x8(),
3344 [4, 5, 6, 7]
3345 ));
3346 simd_mul(lhs, rhs).v128()
3347 }
3348}
3349
3350#[inline]
3355#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3356#[target_feature(enable = "simd128")]
3357#[doc(alias("i32x4.extmul_low_i16x8_u"))]
3358#[stable(feature = "wasm_simd", since = "1.54.0")]
3359pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3360 unsafe {
3361 let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3362 a.as_u16x8(),
3363 a.as_u16x8(),
3364 [0, 1, 2, 3]
3365 ));
3366 let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3367 b.as_u16x8(),
3368 b.as_u16x8(),
3369 [0, 1, 2, 3]
3370 ));
3371 simd_mul(lhs, rhs).v128()
3372 }
3373}
3374
3375#[stable(feature = "wasm_simd", since = "1.54.0")]
3376pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3377
3378#[inline]
3383#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3384#[target_feature(enable = "simd128")]
3385#[doc(alias("i32x4.extmul_high_i16x8_u"))]
3386#[stable(feature = "wasm_simd", since = "1.54.0")]
3387pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3388 unsafe {
3389 let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3390 a.as_u16x8(),
3391 a.as_u16x8(),
3392 [4, 5, 6, 7]
3393 ));
3394 let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3395 b.as_u16x8(),
3396 b.as_u16x8(),
3397 [4, 5, 6, 7]
3398 ));
3399 simd_mul(lhs, rhs).v128()
3400 }
3401}
3402
3403#[stable(feature = "wasm_simd", since = "1.54.0")]
3404pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3405
3406#[inline]
3408#[cfg_attr(test, assert_instr(i64x2.abs))]
3409#[target_feature(enable = "simd128")]
3410#[doc(alias("i64x2.abs"))]
3411#[stable(feature = "wasm_simd", since = "1.54.0")]
3412pub fn i64x2_abs(a: v128) -> v128 {
3413 let a = a.as_i64x2();
3414 let zero = simd::i64x2::ZERO;
3415 unsafe {
3416 simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3417 }
3418}
3419
3420#[inline]
3422#[cfg_attr(test, assert_instr(i64x2.neg))]
3423#[target_feature(enable = "simd128")]
3424#[doc(alias("i64x2.neg"))]
3425#[stable(feature = "wasm_simd", since = "1.54.0")]
3426pub fn i64x2_neg(a: v128) -> v128 {
3427 unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3428}
3429
3430#[inline]
3432#[cfg_attr(test, assert_instr(i64x2.all_true))]
3433#[target_feature(enable = "simd128")]
3434#[doc(alias("i64x2.all_true"))]
3435#[stable(feature = "wasm_simd", since = "1.54.0")]
3436pub fn i64x2_all_true(a: v128) -> bool {
3437 unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3438}
3439
3440#[stable(feature = "wasm_simd", since = "1.54.0")]
3441pub use i64x2_all_true as u64x2_all_true;
3442
3443#[inline]
3446#[cfg_attr(test, assert_instr(i64x2.bitmask))]
3447#[target_feature(enable = "simd128")]
3448#[doc(alias("i64x2.bitmask"))]
3449#[stable(feature = "wasm_simd", since = "1.54.0")]
3450pub fn i64x2_bitmask(a: v128) -> u8 {
3451 unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3452}
3453
3454#[stable(feature = "wasm_simd", since = "1.54.0")]
3455pub use i64x2_bitmask as u64x2_bitmask;
3456
3457#[inline]
3460#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3461#[target_feature(enable = "simd128")]
3462#[doc(alias("i64x2.extend_low_i32x4_s"))]
3463#[stable(feature = "wasm_simd", since = "1.54.0")]
3464pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3465 unsafe {
3466 simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3467 .v128()
3468 }
3469}
3470
3471#[inline]
3474#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3475#[target_feature(enable = "simd128")]
3476#[doc(alias("i64x2.extend_high_i32x4_s"))]
3477#[stable(feature = "wasm_simd", since = "1.54.0")]
3478pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3479 unsafe {
3480 simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3481 .v128()
3482 }
3483}
3484
3485#[inline]
3488#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3489#[target_feature(enable = "simd128")]
3490#[doc(alias("i64x2.extend_low_i32x4_u"))]
3491#[stable(feature = "wasm_simd", since = "1.54.0")]
3492pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3493 unsafe {
3494 simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3495 .v128()
3496 }
3497}
3498
3499#[stable(feature = "wasm_simd", since = "1.54.0")]
3500pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3501
3502#[inline]
3505#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3506#[target_feature(enable = "simd128")]
3507#[doc(alias("i64x2.extend_high_i32x4_u"))]
3508#[stable(feature = "wasm_simd", since = "1.54.0")]
3509pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3510 unsafe {
3511 simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3512 .v128()
3513 }
3514}
3515
3516#[stable(feature = "wasm_simd", since = "1.54.0")]
3517pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3518
3519#[inline]
3524#[cfg_attr(test, assert_instr(i64x2.shl))]
3525#[target_feature(enable = "simd128")]
3526#[doc(alias("i64x2.shl"))]
3527#[stable(feature = "wasm_simd", since = "1.54.0")]
3528pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3529 unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3532}
3533
3534#[stable(feature = "wasm_simd", since = "1.54.0")]
3535pub use i64x2_shl as u64x2_shl;
3536
3537#[inline]
3543#[cfg_attr(test, assert_instr(i64x2.shr_s))]
3544#[target_feature(enable = "simd128")]
3545#[doc(alias("i64x2.shr_s"))]
3546#[stable(feature = "wasm_simd", since = "1.54.0")]
3547pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3548 unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3551}
3552
3553#[inline]
3559#[cfg_attr(test, assert_instr(i64x2.shr_u))]
3560#[target_feature(enable = "simd128")]
3561#[doc(alias("i64x2.shr_u"))]
3562#[stable(feature = "wasm_simd", since = "1.54.0")]
3563pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3564 unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat((amt & 0x3f) as u64)).v128() }
3567}
3568
3569#[inline]
3571#[cfg_attr(test, assert_instr(i64x2.add))]
3572#[target_feature(enable = "simd128")]
3573#[doc(alias("i64x2.add"))]
3574#[stable(feature = "wasm_simd", since = "1.54.0")]
3575pub fn i64x2_add(a: v128, b: v128) -> v128 {
3576 unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3577}
3578
3579#[stable(feature = "wasm_simd", since = "1.54.0")]
3580pub use i64x2_add as u64x2_add;
3581
3582#[inline]
3584#[cfg_attr(test, assert_instr(i64x2.sub))]
3585#[target_feature(enable = "simd128")]
3586#[doc(alias("i64x2.sub"))]
3587#[stable(feature = "wasm_simd", since = "1.54.0")]
3588pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3589 unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3590}
3591
3592#[stable(feature = "wasm_simd", since = "1.54.0")]
3593pub use i64x2_sub as u64x2_sub;
3594
3595#[inline]
3597#[cfg_attr(test, assert_instr(i64x2.mul))]
3598#[target_feature(enable = "simd128")]
3599#[doc(alias("i64x2.mul"))]
3600#[stable(feature = "wasm_simd", since = "1.54.0")]
3601pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3602 unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3603}
3604
3605#[stable(feature = "wasm_simd", since = "1.54.0")]
3606pub use i64x2_mul as u64x2_mul;
3607
3608#[inline]
3613#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3614#[target_feature(enable = "simd128")]
3615#[doc(alias("i64x2.extmul_low_i32x4_s"))]
3616#[stable(feature = "wasm_simd", since = "1.54.0")]
3617pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3618 unsafe {
3619 let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3620 a.as_i32x4(),
3621 a.as_i32x4(),
3622 [0, 1]
3623 ));
3624 let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3625 b.as_i32x4(),
3626 b.as_i32x4(),
3627 [0, 1]
3628 ));
3629 simd_mul(lhs, rhs).v128()
3630 }
3631}
3632
3633#[inline]
3638#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3639#[target_feature(enable = "simd128")]
3640#[doc(alias("i64x2.extmul_high_i32x4_s"))]
3641#[stable(feature = "wasm_simd", since = "1.54.0")]
3642pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3643 unsafe {
3644 let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3645 a.as_i32x4(),
3646 a.as_i32x4(),
3647 [2, 3]
3648 ));
3649 let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3650 b.as_i32x4(),
3651 b.as_i32x4(),
3652 [2, 3]
3653 ));
3654 simd_mul(lhs, rhs).v128()
3655 }
3656}
3657
3658#[inline]
3663#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3664#[target_feature(enable = "simd128")]
3665#[doc(alias("i64x2.extmul_low_i32x4_u"))]
3666#[stable(feature = "wasm_simd", since = "1.54.0")]
3667pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3668 unsafe {
3669 let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3670 a.as_u32x4(),
3671 a.as_u32x4(),
3672 [0, 1]
3673 ));
3674 let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3675 b.as_u32x4(),
3676 b.as_u32x4(),
3677 [0, 1]
3678 ));
3679 simd_mul(lhs, rhs).v128()
3680 }
3681}
3682
3683#[stable(feature = "wasm_simd", since = "1.54.0")]
3684pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3685
3686#[inline]
3691#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3692#[target_feature(enable = "simd128")]
3693#[doc(alias("i64x2.extmul_high_i32x4_u"))]
3694#[stable(feature = "wasm_simd", since = "1.54.0")]
3695pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3696 unsafe {
3697 let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3698 a.as_u32x4(),
3699 a.as_u32x4(),
3700 [2, 3]
3701 ));
3702 let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3703 b.as_u32x4(),
3704 b.as_u32x4(),
3705 [2, 3]
3706 ));
3707 simd_mul(lhs, rhs).v128()
3708 }
3709}
3710
3711#[stable(feature = "wasm_simd", since = "1.54.0")]
3712pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3713
3714#[inline]
3716#[cfg_attr(test, assert_instr(f32x4.ceil))]
3717#[target_feature(enable = "simd128")]
3718#[doc(alias("f32x4.ceil"))]
3719#[stable(feature = "wasm_simd", since = "1.54.0")]
3720pub fn f32x4_ceil(a: v128) -> v128 {
3721 unsafe { simd_ceil(a.as_f32x4()).v128() }
3722}
3723
3724#[inline]
3726#[cfg_attr(test, assert_instr(f32x4.floor))]
3727#[target_feature(enable = "simd128")]
3728#[doc(alias("f32x4.floor"))]
3729#[stable(feature = "wasm_simd", since = "1.54.0")]
3730pub fn f32x4_floor(a: v128) -> v128 {
3731 unsafe { simd_floor(a.as_f32x4()).v128() }
3732}
3733
3734#[inline]
3737#[cfg_attr(test, assert_instr(f32x4.trunc))]
3738#[target_feature(enable = "simd128")]
3739#[doc(alias("f32x4.trunc"))]
3740#[stable(feature = "wasm_simd", since = "1.54.0")]
3741pub fn f32x4_trunc(a: v128) -> v128 {
3742 unsafe { simd_trunc(a.as_f32x4()).v128() }
3743}
3744
3745#[inline]
3748#[cfg_attr(test, assert_instr(f32x4.nearest))]
3749#[target_feature(enable = "simd128")]
3750#[doc(alias("f32x4.nearest"))]
3751#[stable(feature = "wasm_simd", since = "1.54.0")]
3752pub fn f32x4_nearest(a: v128) -> v128 {
3753 unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3754}
3755
3756#[inline]
3759#[cfg_attr(test, assert_instr(f32x4.abs))]
3760#[target_feature(enable = "simd128")]
3761#[doc(alias("f32x4.abs"))]
3762#[stable(feature = "wasm_simd", since = "1.54.0")]
3763pub fn f32x4_abs(a: v128) -> v128 {
3764 unsafe { simd_fabs(a.as_f32x4()).v128() }
3765}
3766
3767#[inline]
3770#[cfg_attr(test, assert_instr(f32x4.neg))]
3771#[target_feature(enable = "simd128")]
3772#[doc(alias("f32x4.neg"))]
3773#[stable(feature = "wasm_simd", since = "1.54.0")]
3774pub fn f32x4_neg(a: v128) -> v128 {
3775 unsafe { simd_neg(a.as_f32x4()).v128() }
3776}
3777
3778#[inline]
3781#[cfg_attr(test, assert_instr(f32x4.sqrt))]
3782#[target_feature(enable = "simd128")]
3783#[doc(alias("f32x4.sqrt"))]
3784#[stable(feature = "wasm_simd", since = "1.54.0")]
3785pub fn f32x4_sqrt(a: v128) -> v128 {
3786 unsafe { simd_fsqrt(a.as_f32x4()).v128() }
3787}
3788
3789#[inline]
3792#[cfg_attr(test, assert_instr(f32x4.add))]
3793#[target_feature(enable = "simd128")]
3794#[doc(alias("f32x4.add"))]
3795#[stable(feature = "wasm_simd", since = "1.54.0")]
3796pub fn f32x4_add(a: v128, b: v128) -> v128 {
3797 unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3798}
3799
3800#[inline]
3803#[cfg_attr(test, assert_instr(f32x4.sub))]
3804#[target_feature(enable = "simd128")]
3805#[doc(alias("f32x4.sub"))]
3806#[stable(feature = "wasm_simd", since = "1.54.0")]
3807pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3808 unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3809}
3810
3811#[inline]
3814#[cfg_attr(test, assert_instr(f32x4.mul))]
3815#[target_feature(enable = "simd128")]
3816#[doc(alias("f32x4.mul"))]
3817#[stable(feature = "wasm_simd", since = "1.54.0")]
3818pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3819 unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3820}
3821
3822#[inline]
3825#[cfg_attr(test, assert_instr(f32x4.div))]
3826#[target_feature(enable = "simd128")]
3827#[doc(alias("f32x4.div"))]
3828#[stable(feature = "wasm_simd", since = "1.54.0")]
3829pub fn f32x4_div(a: v128, b: v128) -> v128 {
3830 unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3831}
3832
3833#[inline]
3836#[cfg_attr(test, assert_instr(f32x4.min))]
3837#[target_feature(enable = "simd128")]
3838#[doc(alias("f32x4.min"))]
3839#[stable(feature = "wasm_simd", since = "1.54.0")]
3840pub fn f32x4_min(a: v128, b: v128) -> v128 {
3841 unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3842}
3843
3844#[inline]
3847#[cfg_attr(test, assert_instr(f32x4.max))]
3848#[target_feature(enable = "simd128")]
3849#[doc(alias("f32x4.max"))]
3850#[stable(feature = "wasm_simd", since = "1.54.0")]
3851pub fn f32x4_max(a: v128, b: v128) -> v128 {
3852 unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3853}
3854
3855#[inline]
3857#[cfg_attr(test, assert_instr(f32x4.pmin))]
3858#[target_feature(enable = "simd128")]
3859#[doc(alias("f32x4.pmin"))]
3860#[stable(feature = "wasm_simd", since = "1.54.0")]
3861pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3862 unsafe {
3863 simd_select::<simd::m32x4, simd::f32x4>(
3864 simd_lt(b.as_f32x4(), a.as_f32x4()),
3865 b.as_f32x4(),
3866 a.as_f32x4(),
3867 )
3868 .v128()
3869 }
3870}
3871
3872#[inline]
3874#[cfg_attr(test, assert_instr(f32x4.pmax))]
3875#[target_feature(enable = "simd128")]
3876#[doc(alias("f32x4.pmax"))]
3877#[stable(feature = "wasm_simd", since = "1.54.0")]
3878pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3879 unsafe {
3880 simd_select::<simd::m32x4, simd::f32x4>(
3881 simd_lt(a.as_f32x4(), b.as_f32x4()),
3882 b.as_f32x4(),
3883 a.as_f32x4(),
3884 )
3885 .v128()
3886 }
3887}
3888
3889#[inline]
3891#[cfg_attr(test, assert_instr(f64x2.ceil))]
3892#[target_feature(enable = "simd128")]
3893#[doc(alias("f64x2.ceil"))]
3894#[stable(feature = "wasm_simd", since = "1.54.0")]
3895pub fn f64x2_ceil(a: v128) -> v128 {
3896 unsafe { simd_ceil(a.as_f64x2()).v128() }
3897}
3898
3899#[inline]
3901#[cfg_attr(test, assert_instr(f64x2.floor))]
3902#[target_feature(enable = "simd128")]
3903#[doc(alias("f64x2.floor"))]
3904#[stable(feature = "wasm_simd", since = "1.54.0")]
3905pub fn f64x2_floor(a: v128) -> v128 {
3906 unsafe { simd_floor(a.as_f64x2()).v128() }
3907}
3908
3909#[inline]
3912#[cfg_attr(test, assert_instr(f64x2.trunc))]
3913#[target_feature(enable = "simd128")]
3914#[doc(alias("f64x2.trunc"))]
3915#[stable(feature = "wasm_simd", since = "1.54.0")]
3916pub fn f64x2_trunc(a: v128) -> v128 {
3917 unsafe { simd_trunc(a.as_f64x2()).v128() }
3918}
3919
3920#[inline]
3923#[cfg_attr(test, assert_instr(f64x2.nearest))]
3924#[target_feature(enable = "simd128")]
3925#[doc(alias("f64x2.nearest"))]
3926#[stable(feature = "wasm_simd", since = "1.54.0")]
3927pub fn f64x2_nearest(a: v128) -> v128 {
3928 unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3929}
3930
3931#[inline]
3934#[cfg_attr(test, assert_instr(f64x2.abs))]
3935#[target_feature(enable = "simd128")]
3936#[doc(alias("f64x2.abs"))]
3937#[stable(feature = "wasm_simd", since = "1.54.0")]
3938pub fn f64x2_abs(a: v128) -> v128 {
3939 unsafe { simd_fabs(a.as_f64x2()).v128() }
3940}
3941
3942#[inline]
3945#[cfg_attr(test, assert_instr(f64x2.neg))]
3946#[target_feature(enable = "simd128")]
3947#[doc(alias("f64x2.neg"))]
3948#[stable(feature = "wasm_simd", since = "1.54.0")]
3949pub fn f64x2_neg(a: v128) -> v128 {
3950 unsafe { simd_neg(a.as_f64x2()).v128() }
3951}
3952
3953#[inline]
3956#[cfg_attr(test, assert_instr(f64x2.sqrt))]
3957#[target_feature(enable = "simd128")]
3958#[doc(alias("f64x2.sqrt"))]
3959#[stable(feature = "wasm_simd", since = "1.54.0")]
3960pub fn f64x2_sqrt(a: v128) -> v128 {
3961 unsafe { simd_fsqrt(a.as_f64x2()).v128() }
3962}
3963
3964#[inline]
3967#[cfg_attr(test, assert_instr(f64x2.add))]
3968#[target_feature(enable = "simd128")]
3969#[doc(alias("f64x2.add"))]
3970#[stable(feature = "wasm_simd", since = "1.54.0")]
3971pub fn f64x2_add(a: v128, b: v128) -> v128 {
3972 unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
3973}
3974
3975#[inline]
3978#[cfg_attr(test, assert_instr(f64x2.sub))]
3979#[target_feature(enable = "simd128")]
3980#[doc(alias("f64x2.sub"))]
3981#[stable(feature = "wasm_simd", since = "1.54.0")]
3982pub fn f64x2_sub(a: v128, b: v128) -> v128 {
3983 unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
3984}
3985
3986#[inline]
3989#[cfg_attr(test, assert_instr(f64x2.mul))]
3990#[target_feature(enable = "simd128")]
3991#[doc(alias("f64x2.mul"))]
3992#[stable(feature = "wasm_simd", since = "1.54.0")]
3993pub fn f64x2_mul(a: v128, b: v128) -> v128 {
3994 unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
3995}
3996
3997#[inline]
4000#[cfg_attr(test, assert_instr(f64x2.div))]
4001#[target_feature(enable = "simd128")]
4002#[doc(alias("f64x2.div"))]
4003#[stable(feature = "wasm_simd", since = "1.54.0")]
4004pub fn f64x2_div(a: v128, b: v128) -> v128 {
4005 unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
4006}
4007
4008#[inline]
4011#[cfg_attr(test, assert_instr(f64x2.min))]
4012#[target_feature(enable = "simd128")]
4013#[doc(alias("f64x2.min"))]
4014#[stable(feature = "wasm_simd", since = "1.54.0")]
4015pub fn f64x2_min(a: v128, b: v128) -> v128 {
4016 unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
4017}
4018
4019#[inline]
4022#[cfg_attr(test, assert_instr(f64x2.max))]
4023#[target_feature(enable = "simd128")]
4024#[doc(alias("f64x2.max"))]
4025#[stable(feature = "wasm_simd", since = "1.54.0")]
4026pub fn f64x2_max(a: v128, b: v128) -> v128 {
4027 unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
4028}
4029
4030#[inline]
4032#[cfg_attr(test, assert_instr(f64x2.pmin))]
4033#[target_feature(enable = "simd128")]
4034#[doc(alias("f64x2.pmin"))]
4035#[stable(feature = "wasm_simd", since = "1.54.0")]
4036pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4037 unsafe {
4038 simd_select::<simd::m64x2, simd::f64x2>(
4039 simd_lt(b.as_f64x2(), a.as_f64x2()),
4040 b.as_f64x2(),
4041 a.as_f64x2(),
4042 )
4043 .v128()
4044 }
4045}
4046
4047#[inline]
4049#[cfg_attr(test, assert_instr(f64x2.pmax))]
4050#[target_feature(enable = "simd128")]
4051#[doc(alias("f64x2.pmax"))]
4052#[stable(feature = "wasm_simd", since = "1.54.0")]
4053pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4054 unsafe {
4055 simd_select::<simd::m64x2, simd::f64x2>(
4056 simd_lt(a.as_f64x2(), b.as_f64x2()),
4057 b.as_f64x2(),
4058 a.as_f64x2(),
4059 )
4060 .v128()
4061 }
4062}
4063
4064#[inline]
4070#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4071#[target_feature(enable = "simd128")]
4072#[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4073#[stable(feature = "wasm_simd", since = "1.54.0")]
4074pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4075 unsafe { simd_as::<simd::f32x4, simd::i32x4>(a.as_f32x4()).v128() }
4076}
4077
4078#[inline]
4084#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4085#[target_feature(enable = "simd128")]
4086#[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4087#[stable(feature = "wasm_simd", since = "1.54.0")]
4088pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4089 unsafe { simd_as::<simd::f32x4, simd::u32x4>(a.as_f32x4()).v128() }
4090}
4091
4092#[inline]
4095#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4096#[target_feature(enable = "simd128")]
4097#[doc(alias("f32x4.convert_i32x4_s"))]
4098#[stable(feature = "wasm_simd", since = "1.54.0")]
4099pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4100 unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4101}
4102
4103#[inline]
4106#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4107#[target_feature(enable = "simd128")]
4108#[doc(alias("f32x4.convert_i32x4_u"))]
4109#[stable(feature = "wasm_simd", since = "1.54.0")]
4110pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4111 unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4112}
4113
4114#[inline]
4123#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4124#[target_feature(enable = "simd128")]
4125#[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4126#[stable(feature = "wasm_simd", since = "1.54.0")]
4127pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4128 let ret: simd::i32x4 = unsafe {
4129 simd_shuffle!(
4130 simd_as::<simd::f64x2, simd::i32x2>(a.as_f64x2()),
4131 simd::i32x2::ZERO,
4132 [0, 1, 2, 3],
4133 )
4134 };
4135 ret.v128()
4136}
4137
4138#[inline]
4147#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4148#[target_feature(enable = "simd128")]
4149#[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4150#[stable(feature = "wasm_simd", since = "1.54.0")]
4151pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4152 let ret: simd::u32x4 = unsafe {
4153 simd_shuffle!(
4154 simd_as::<simd::f64x2, simd::u32x2>(a.as_f64x2()),
4155 simd::u32x2::ZERO,
4156 [0, 1, 2, 3],
4157 )
4158 };
4159 ret.v128()
4160}
4161
4162#[inline]
4164#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4165#[target_feature(enable = "simd128")]
4166#[doc(alias("f64x2.convert_low_i32x4_s"))]
4167#[stable(feature = "wasm_simd", since = "1.54.0")]
4168pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4169 unsafe {
4170 simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4171 .v128()
4172 }
4173}
4174
4175#[inline]
4177#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4178#[target_feature(enable = "simd128")]
4179#[doc(alias("f64x2.convert_low_i32x4_u"))]
4180#[stable(feature = "wasm_simd", since = "1.54.0")]
4181pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4182 unsafe {
4183 simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4184 .v128()
4185 }
4186}
4187
4188#[inline]
4194#[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4195#[target_feature(enable = "simd128")]
4196#[doc(alias("f32x4.demote_f64x2_zero"))]
4197#[stable(feature = "wasm_simd", since = "1.54.0")]
4198pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4199 unsafe {
4200 simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
4201 a.as_f64x2(),
4202 simd::f64x2::ZERO,
4203 [0, 1, 2, 3]
4204 ))
4205 .v128()
4206 }
4207}
4208
4209#[inline]
4212#[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4213#[target_feature(enable = "simd128")]
4214#[doc(alias("f32x4.promote_low_f32x4"))]
4215#[stable(feature = "wasm_simd", since = "1.54.0")]
4216pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4217 unsafe {
4218 simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4219 .v128()
4220 }
4221}
4222
4223#[cfg(test)]
4224mod tests {
4225 use super::*;
4226 use core::ops::{Add, Div, Mul, Neg, Sub};
4227
4228 use std::fmt::Debug;
4229 use std::mem::transmute;
4230 use std::num::Wrapping;
4231 use std::prelude::v1::*;
4232
4233 const _C1: v128 = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4234 const _C2: v128 = u8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4235 const _C3: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4236 const _C4: v128 = u16x8(0, 1, 2, 3, 4, 5, 6, 7);
4237 const _C5: v128 = i32x4(0, 1, 2, 3);
4238 const _C6: v128 = u32x4(0, 1, 2, 3);
4239 const _C7: v128 = i64x2(0, 1);
4240 const _C8: v128 = u64x2(0, 1);
4241 const _C9: v128 = f32x4(0.0, 1.0, 2.0, 3.0);
4242 const _C10: v128 = f64x2(0.0, 1.0);
4243
4244 fn compare_bytes(a: v128, b: v128) {
4245 let a: [u8; 16] = unsafe { transmute(a) };
4246 let b: [u8; 16] = unsafe { transmute(b) };
4247 assert_eq!(a, b);
4248 }
4249
4250 #[test]
4251 fn test_load() {
4252 unsafe {
4253 let arr: [i32; 4] = [0, 1, 2, 3];
4254 let vec = v128_load(arr.as_ptr() as *const v128);
4255 compare_bytes(vec, i32x4(0, 1, 2, 3));
4256 }
4257 }
4258
4259 #[test]
4260 fn test_load_extend() {
4261 unsafe {
4262 let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4263 let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4264 compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4265 let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4266 compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4267
4268 let arr: [i16; 4] = [-1, 0, 1, 2];
4269 let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4270 compare_bytes(vec, i32x4(-1, 0, 1, 2));
4271 let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4272 compare_bytes(vec, i32x4(65535, 0, 1, 2));
4273
4274 let arr: [i32; 2] = [-1, 1];
4275 let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4276 compare_bytes(vec, i64x2(-1, 1));
4277 let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4278 compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4279 }
4280 }
4281
4282 #[test]
4283 fn test_load_splat() {
4284 unsafe {
4285 compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4286 compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4287 compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4288 compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4289 }
4290 }
4291
4292 #[test]
4293 fn test_load_zero() {
4294 unsafe {
4295 compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4296 compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4297 }
4298 }
4299
4300 #[test]
4301 fn test_store() {
4302 unsafe {
4303 let mut spot = i8x16_splat(0);
4304 v128_store(&mut spot, i8x16_splat(1));
4305 compare_bytes(spot, i8x16_splat(1));
4306 }
4307 }
4308
4309 #[test]
4310 fn test_load_lane() {
4311 unsafe {
4312 let zero = i8x16_splat(0);
4313 compare_bytes(
4314 v128_load8_lane::<2>(zero, &1),
4315 i8x16_replace_lane::<2>(zero, 1),
4316 );
4317
4318 compare_bytes(
4319 v128_load16_lane::<2>(zero, &1),
4320 i16x8_replace_lane::<2>(zero, 1),
4321 );
4322
4323 compare_bytes(
4324 v128_load32_lane::<2>(zero, &1),
4325 i32x4_replace_lane::<2>(zero, 1),
4326 );
4327
4328 compare_bytes(
4329 v128_load64_lane::<1>(zero, &1),
4330 i64x2_replace_lane::<1>(zero, 1),
4331 );
4332 }
4333 }
4334
4335 #[test]
4336 fn test_store_lane() {
4337 unsafe {
4338 let mut spot = 0;
4339 let zero = i8x16_splat(0);
4340 v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4341 assert_eq!(spot, 7);
4342
4343 let mut spot = 0;
4344 v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4345 assert_eq!(spot, 7);
4346
4347 let mut spot = 0;
4348 v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4349 assert_eq!(spot, 7);
4350
4351 let mut spot = 0;
4352 v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4353 assert_eq!(spot, 7);
4354 }
4355 }
4356
4357 #[test]
4358 fn test_i8x16() {
4359 const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4360 compare_bytes(A, A);
4361
4362 const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4363 const _: v128 = i32x4(0, 1, 2, 3);
4364 const _: v128 = i64x2(0, 1);
4365 const _: v128 = f32x4(0., 1., 2., 3.);
4366 const _: v128 = f64x2(0., 1.);
4367
4368 let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4369 assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4370 let bytes: [i8; 16] = unsafe {
4371 mem::transmute(i8x16(
4372 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4373 ))
4374 };
4375 assert_eq!(
4376 bytes,
4377 [
4378 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16
4379 ]
4380 );
4381 }
4382
4383 #[test]
4384 fn test_shuffle() {
4385 let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4386 let vec_b = i8x16(
4387 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4388 );
4389
4390 let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4391 vec_a, vec_b,
4392 );
4393 let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4394 compare_bytes(vec_r, vec_e);
4395
4396 let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4397 let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4398 let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4399 let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4400 compare_bytes(vec_r, vec_e);
4401
4402 let vec_a = i32x4(0, 1, 2, 3);
4403 let vec_b = i32x4(4, 5, 6, 7);
4404 let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4405 let vec_e = i32x4(0, 4, 2, 6);
4406 compare_bytes(vec_r, vec_e);
4407
4408 let vec_a = i64x2(0, 1);
4409 let vec_b = i64x2(2, 3);
4410 let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4411 let vec_e = i64x2(0, 2);
4412 compare_bytes(vec_r, vec_e);
4413 }
4414
4415 macro_rules! test_extract {
4417 (
4418 name: $test_id:ident,
4419 extract: $extract:ident,
4420 replace: $replace:ident,
4421 elem: $elem:ty,
4422 count: $count:expr,
4423 indices: [$($idx:expr),*],
4424 ) => {
4425 #[test]
4426 fn $test_id() {
4427 unsafe {
4428 let arr: [$elem; $count] = [123 as $elem; $count];
4429 let vec: v128 = transmute(arr);
4430 $(
4431 assert_eq!($extract::<$idx>(vec), 123 as $elem);
4432 )*
4433
4434 let arr: [$elem; $count] = [$($idx as $elem),*];
4437 let vec: v128 = transmute(arr);
4438 $(
4439 assert_eq!($extract::<$idx>(vec), $idx as $elem);
4440
4441 let tmp = $replace::<$idx>(vec, 124 as $elem);
4442 assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4443 )*
4444 }
4445 }
4446 }
4447 }
4448
4449 test_extract! {
4450 name: test_i8x16_extract_replace,
4451 extract: i8x16_extract_lane,
4452 replace: i8x16_replace_lane,
4453 elem: i8,
4454 count: 16,
4455 indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4456 }
4457 test_extract! {
4458 name: test_i16x8_extract_replace,
4459 extract: i16x8_extract_lane,
4460 replace: i16x8_replace_lane,
4461 elem: i16,
4462 count: 8,
4463 indices: [0, 1, 2, 3, 4, 5, 6, 7],
4464 }
4465 test_extract! {
4466 name: test_i32x4_extract_replace,
4467 extract: i32x4_extract_lane,
4468 replace: i32x4_replace_lane,
4469 elem: i32,
4470 count: 4,
4471 indices: [0, 1, 2, 3],
4472 }
4473 test_extract! {
4474 name: test_i64x2_extract_replace,
4475 extract: i64x2_extract_lane,
4476 replace: i64x2_replace_lane,
4477 elem: i64,
4478 count: 2,
4479 indices: [0, 1],
4480 }
4481 test_extract! {
4482 name: test_f32x4_extract_replace,
4483 extract: f32x4_extract_lane,
4484 replace: f32x4_replace_lane,
4485 elem: f32,
4486 count: 4,
4487 indices: [0, 1, 2, 3],
4488 }
4489 test_extract! {
4490 name: test_f64x2_extract_replace,
4491 extract: f64x2_extract_lane,
4492 replace: f64x2_replace_lane,
4493 elem: f64,
4494 count: 2,
4495 indices: [0, 1],
4496 }
4497
4498 #[test]
4499 #[rustfmt::skip]
4500 fn test_swizzle() {
4501 compare_bytes(
4502 i8x16_swizzle(
4503 i32x4(1, 2, 3, 4),
4504 i8x16(
4505 32, 31, 30, 29,
4506 0, 1, 2, 3,
4507 12, 13, 14, 15,
4508 0, 4, 8, 12),
4509 ),
4510 i32x4(0, 1, 4, 0x04030201),
4511 );
4512 }
4513
4514 macro_rules! test_splat {
4515 ($test_id:ident: $val:expr => $($vals:expr),*) => {
4516 #[test]
4517 fn $test_id() {
4518 let a = super::$test_id($val);
4519 let b = u8x16($($vals as u8),*);
4520 compare_bytes(a, b);
4521 }
4522 }
4523 }
4524
4525 mod splats {
4526 use super::*;
4527 test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4528 test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4529 test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4530 test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4531 test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4532 test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4533 }
4534
4535 #[test]
4536 fn test_bitmasks() {
4537 let zero = i8x16_splat(0);
4538 let ones = i8x16_splat(!0);
4539
4540 assert_eq!(i8x16_bitmask(zero), 0);
4541 assert_eq!(i8x16_bitmask(ones), 0xffff);
4542 assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4543 assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4544 assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4545
4546 assert_eq!(i16x8_bitmask(zero), 0);
4547 assert_eq!(i16x8_bitmask(ones), 0xff);
4548 assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4549 assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4550 assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4551
4552 assert_eq!(i32x4_bitmask(zero), 0);
4553 assert_eq!(i32x4_bitmask(ones), 0b1111);
4554 assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4555 assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4556 assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4557
4558 assert_eq!(i64x2_bitmask(zero), 0);
4559 assert_eq!(i64x2_bitmask(ones), 0b11);
4560 assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4561 assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4562 assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4563 }
4564
4565 #[test]
4566 fn test_narrow() {
4567 let zero = i8x16_splat(0);
4568 let ones = i8x16_splat(!0);
4569
4570 compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4571 compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4572 compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4573 compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4574
4575 compare_bytes(
4576 i8x16_narrow_i16x8(
4577 i16x8(
4578 0,
4579 1,
4580 2,
4581 -1,
4582 i8::MIN.into(),
4583 i8::MAX.into(),
4584 u8::MIN.into(),
4585 u8::MAX.into(),
4586 ),
4587 i16x8(
4588 i16::MIN,
4589 i16::MAX,
4590 u16::MIN as i16,
4591 u16::MAX as i16,
4592 0,
4593 0,
4594 0,
4595 0,
4596 ),
4597 ),
4598 i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4599 );
4600
4601 compare_bytes(
4602 u8x16_narrow_i16x8(
4603 i16x8(
4604 0,
4605 1,
4606 2,
4607 -1,
4608 i8::MIN.into(),
4609 i8::MAX.into(),
4610 u8::MIN.into(),
4611 u8::MAX.into(),
4612 ),
4613 i16x8(
4614 i16::MIN,
4615 i16::MAX,
4616 u16::MIN as i16,
4617 u16::MAX as i16,
4618 0,
4619 0,
4620 0,
4621 0,
4622 ),
4623 ),
4624 i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4625 );
4626
4627 compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4628 compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4629 compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4630 compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4631
4632 compare_bytes(
4633 i16x8_narrow_i32x4(
4634 i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4635 i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4636 ),
4637 i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4638 );
4639
4640 compare_bytes(
4641 u16x8_narrow_i32x4(
4642 i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4643 i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4644 ),
4645 i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4646 );
4647 }
4648
4649 #[test]
4650 fn test_extend() {
4651 let zero = i8x16_splat(0);
4652 let ones = i8x16_splat(!0);
4653
4654 compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4655 compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4656 compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4657 compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4658 compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4659 compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4660 let halves = u16x8_splat(u8::MAX.into());
4661 compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4662 compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4663
4664 compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4665 compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4666 compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4667 compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4668 compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4669 compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4670 let halves = u32x4_splat(u16::MAX.into());
4671 compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4672 compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4673
4674 compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4675 compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4676 compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4677 compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4678 compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4679 compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4680 let halves = i64x2_splat(u32::MAX.into());
4681 compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4682 compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4683 }
4684
4685 #[test]
4686 fn test_dot() {
4687 let zero = i8x16_splat(0);
4688 let ones = i8x16_splat(!0);
4689 let two = i32x4_splat(2);
4690 compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4691 compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4692 }
4693
4694 macro_rules! test_binop {
4695 (
4696 $($name:ident => {
4697 $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4698 })*
4699 ) => ($(
4700 #[test]
4701 fn $name() {
4702 unsafe {
4703 $(
4704 let v1 = [$($vec1)*];
4705 let v2 = [$($vec2)*];
4706 let v1_v128: v128 = mem::transmute(v1);
4707 let v2_v128: v128 = mem::transmute(v2);
4708 let v3_v128 = super::$f(v1_v128, v2_v128);
4709 let mut v3 = [$($vec1)*];
4710 let _ignore = v3;
4711 v3 = mem::transmute(v3_v128);
4712
4713 for (i, actual) in v3.iter().enumerate() {
4714 let expected = v1[i].$op(v2[i]);
4715 assert_eq!(*actual, expected);
4716 }
4717 )*
4718 }
4719 }
4720 )*)
4721 }
4722
4723 macro_rules! test_unop {
4724 (
4725 $($name:ident => {
4726 $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4727 })*
4728 ) => ($(
4729 #[test]
4730 fn $name() {
4731 unsafe {
4732 $(
4733 let v1 = [$($vec1)*];
4734 let v1_v128: v128 = mem::transmute(v1);
4735 let v2_v128 = super::$f(v1_v128);
4736 let mut v2 = [$($vec1)*];
4737 let _ignore = v2;
4738 v2 = mem::transmute(v2_v128);
4739
4740 for (i, actual) in v2.iter().enumerate() {
4741 let expected = v1[i].$op();
4742 assert_eq!(*actual, expected);
4743 }
4744 )*
4745 }
4746 }
4747 )*)
4748 }
4749
4750 trait Avgr: Sized {
4751 fn avgr(self, other: Self) -> Self;
4752 }
4753
4754 macro_rules! impl_avgr {
4755 ($($i:ident)*) => ($(impl Avgr for $i {
4756 fn avgr(self, other: Self) -> Self {
4757 ((self as u64 + other as u64 + 1) / 2) as $i
4758 }
4759 })*)
4760 }
4761
4762 impl_avgr!(u8 u16);
4763
4764 test_binop! {
4765 test_i8x16_add => {
4766 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4767 (wrapping_add | i8x16_add)
4768 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4769
4770 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4771 (wrapping_add | i8x16_add)
4772 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4773
4774 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4775 (wrapping_add | i8x16_add)
4776 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4777 }
4778
4779 test_i8x16_add_sat_s => {
4780 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4781 (saturating_add | i8x16_add_sat)
4782 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4783
4784 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4785 (saturating_add | i8x16_add_sat)
4786 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4787
4788 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4789 (saturating_add | i8x16_add_sat)
4790 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4791 }
4792
4793 test_i8x16_add_sat_u => {
4794 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4795 (saturating_add | u8x16_add_sat)
4796 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4797
4798 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4799 (saturating_add | u8x16_add_sat)
4800 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4801
4802 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4803 (saturating_add | u8x16_add_sat)
4804 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4805 }
4806
4807 test_i8x16_sub => {
4808 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4809 (wrapping_sub | i8x16_sub)
4810 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4811
4812 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4813 (wrapping_sub | i8x16_sub)
4814 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4815
4816 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4817 (wrapping_sub | i8x16_sub)
4818 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4819 }
4820
4821 test_i8x16_sub_sat_s => {
4822 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4823 (saturating_sub | i8x16_sub_sat)
4824 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4825
4826 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4827 (saturating_sub | i8x16_sub_sat)
4828 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4829
4830 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4831 (saturating_sub | i8x16_sub_sat)
4832 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4833 }
4834
4835 test_i8x16_sub_sat_u => {
4836 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4837 (saturating_sub | u8x16_sub_sat)
4838 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4839
4840 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4841 (saturating_sub | u8x16_sub_sat)
4842 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4843
4844 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4845 (saturating_sub | u8x16_sub_sat)
4846 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4847 }
4848
4849 test_i8x16_min_s => {
4850 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4851 (min | i8x16_min)
4852 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4853
4854 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4855 (min | i8x16_min)
4856 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4857
4858 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4859 (min | i8x16_min)
4860 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4861 }
4862
4863 test_i8x16_min_u => {
4864 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4865 (min | u8x16_min)
4866 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4867
4868 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4869 (min | u8x16_min)
4870 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4871
4872 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4873 (min | u8x16_min)
4874 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4875 }
4876
4877 test_i8x16_max_s => {
4878 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4879 (max | i8x16_max)
4880 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4881
4882 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4883 (max | i8x16_max)
4884 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4885
4886 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4887 (max | i8x16_max)
4888 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4889 }
4890
4891 test_i8x16_max_u => {
4892 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4893 (max | u8x16_max)
4894 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4895
4896 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4897 (max | u8x16_max)
4898 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4899
4900 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4901 (max | u8x16_max)
4902 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4903 }
4904
4905 test_i8x16_avgr_u => {
4906 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4907 (avgr | u8x16_avgr)
4908 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4909
4910 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4911 (avgr | u8x16_avgr)
4912 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4913
4914 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4915 (avgr | u8x16_avgr)
4916 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4917 }
4918
4919 test_i16x8_add => {
4920 [0i16, 0, 0, 0, 0, 0, 0, 0]
4921 (wrapping_add | i16x8_add)
4922 [1i16, 1, 1, 1, 1, 1, 1, 1],
4923
4924 [1i16, 2, 3, 4, 5, 6, 7, 8]
4925 (wrapping_add | i16x8_add)
4926 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4927 }
4928
4929 test_i16x8_add_sat_s => {
4930 [0i16, 0, 0, 0, 0, 0, 0, 0]
4931 (saturating_add | i16x8_add_sat)
4932 [1i16, 1, 1, 1, 1, 1, 1, 1],
4933
4934 [1i16, 2, 3, 4, 5, 6, 7, 8]
4935 (saturating_add | i16x8_add_sat)
4936 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4937 }
4938
4939 test_i16x8_add_sat_u => {
4940 [0u16, 0, 0, 0, 0, 0, 0, 0]
4941 (saturating_add | u16x8_add_sat)
4942 [1u16, 1, 1, 1, 1, 1, 1, 1],
4943
4944 [1u16, 2, 3, 4, 5, 6, 7, 8]
4945 (saturating_add | u16x8_add_sat)
4946 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4947 }
4948
4949 test_i16x8_sub => {
4950 [0i16, 0, 0, 0, 0, 0, 0, 0]
4951 (wrapping_sub | i16x8_sub)
4952 [1i16, 1, 1, 1, 1, 1, 1, 1],
4953
4954 [1i16, 2, 3, 4, 5, 6, 7, 8]
4955 (wrapping_sub | i16x8_sub)
4956 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4957 }
4958
4959 test_i16x8_sub_sat_s => {
4960 [0i16, 0, 0, 0, 0, 0, 0, 0]
4961 (saturating_sub | i16x8_sub_sat)
4962 [1i16, 1, 1, 1, 1, 1, 1, 1],
4963
4964 [1i16, 2, 3, 4, 5, 6, 7, 8]
4965 (saturating_sub | i16x8_sub_sat)
4966 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4967 }
4968
4969 test_i16x8_sub_sat_u => {
4970 [0u16, 0, 0, 0, 0, 0, 0, 0]
4971 (saturating_sub | u16x8_sub_sat)
4972 [1u16, 1, 1, 1, 1, 1, 1, 1],
4973
4974 [1u16, 2, 3, 4, 5, 6, 7, 8]
4975 (saturating_sub | u16x8_sub_sat)
4976 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4977 }
4978
4979 test_i16x8_mul => {
4980 [0i16, 0, 0, 0, 0, 0, 0, 0]
4981 (wrapping_mul | i16x8_mul)
4982 [1i16, 1, 1, 1, 1, 1, 1, 1],
4983
4984 [1i16, 2, 3, 4, 5, 6, 7, 8]
4985 (wrapping_mul | i16x8_mul)
4986 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4987 }
4988
4989 test_i16x8_min_s => {
4990 [0i16, 0, 0, 0, 0, 0, 0, 0]
4991 (min | i16x8_min)
4992 [1i16, 1, 1, 1, 1, 1, 1, 1],
4993
4994 [1i16, 2, 3, 4, 5, 6, 7, 8]
4995 (min | i16x8_min)
4996 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4997 }
4998
4999 test_i16x8_min_u => {
5000 [0u16, 0, 0, 0, 0, 0, 0, 0]
5001 (min | u16x8_min)
5002 [1u16, 1, 1, 1, 1, 1, 1, 1],
5003
5004 [1u16, 2, 3, 4, 5, 6, 7, 8]
5005 (min | u16x8_min)
5006 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5007 }
5008
5009 test_i16x8_max_s => {
5010 [0i16, 0, 0, 0, 0, 0, 0, 0]
5011 (max | i16x8_max)
5012 [1i16, 1, 1, 1, 1, 1, 1, 1],
5013
5014 [1i16, 2, 3, 4, 5, 6, 7, 8]
5015 (max | i16x8_max)
5016 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5017 }
5018
5019 test_i16x8_max_u => {
5020 [0u16, 0, 0, 0, 0, 0, 0, 0]
5021 (max | u16x8_max)
5022 [1u16, 1, 1, 1, 1, 1, 1, 1],
5023
5024 [1u16, 2, 3, 4, 5, 6, 7, 8]
5025 (max | u16x8_max)
5026 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5027 }
5028
5029 test_i16x8_avgr_u => {
5030 [0u16, 0, 0, 0, 0, 0, 0, 0]
5031 (avgr | u16x8_avgr)
5032 [1u16, 1, 1, 1, 1, 1, 1, 1],
5033
5034 [1u16, 2, 3, 4, 5, 6, 7, 8]
5035 (avgr | u16x8_avgr)
5036 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5037 }
5038
5039 test_i32x4_add => {
5040 [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5041 [1i32, 1283, i32::MAX, i32::MIN]
5042 (wrapping_add | i32x4_add)
5043 [i32::MAX; 4],
5044 }
5045
5046 test_i32x4_sub => {
5047 [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5048 [1i32, 1283, i32::MAX, i32::MIN]
5049 (wrapping_sub | i32x4_sub)
5050 [i32::MAX; 4],
5051 }
5052
5053 test_i32x4_mul => {
5054 [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5055 [1i32, 1283, i32::MAX, i32::MIN]
5056 (wrapping_mul | i32x4_mul)
5057 [i32::MAX; 4],
5058 }
5059
5060 test_i32x4_min_s => {
5061 [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5062 [1i32, 1283, i32::MAX, i32::MIN]
5063 (min | i32x4_min)
5064 [i32::MAX; 4],
5065 }
5066
5067 test_i32x4_min_u => {
5068 [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5069 [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5070 (min | u32x4_min)
5071 [i32::MAX as u32; 4],
5072 }
5073
5074 test_i32x4_max_s => {
5075 [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5076 [1i32, 1283, i32::MAX, i32::MIN]
5077 (max | i32x4_max)
5078 [i32::MAX; 4],
5079 }
5080
5081 test_i32x4_max_u => {
5082 [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5083 [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5084 (max | u32x4_max)
5085 [i32::MAX as u32; 4],
5086 }
5087
5088 test_i64x2_add => {
5089 [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5090 [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5091 [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5092 [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5093 }
5094
5095 test_i64x2_sub => {
5096 [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5097 [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5098 [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5099 [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5100 }
5101
5102 test_i64x2_mul => {
5103 [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5104 [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5105 [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5106 [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5107 }
5108
5109 test_f32x4_add => {
5110 [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5111 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5112 (add | f32x4_add)
5113 [1., 2., 0., 0.],
5114 }
5115
5116 test_f32x4_sub => {
5117 [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5118 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5119 (sub | f32x4_sub)
5120 [1., 2., 0., 0.],
5121 }
5122
5123 test_f32x4_mul => {
5124 [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5125 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5126 (mul | f32x4_mul)
5127 [1., 2., 1., 0.],
5128 }
5129
5130 test_f32x4_div => {
5131 [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5132 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5133 (div | f32x4_div)
5134 [1., 2., 0., 0.],
5135 }
5136
5137 test_f32x4_min => {
5138 [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5139 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5140 (min | f32x4_min)
5141 [1., 2., 0., 0.],
5142 }
5143
5144 test_f32x4_max => {
5145 [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5146 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5147 (max | f32x4_max)
5148 [1., 2., 0., 0.],
5149 }
5150
5151 test_f32x4_pmin => {
5152 [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5153 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5154 (min | f32x4_pmin)
5155 [1., 2., 0., 0.],
5156 }
5157
5158 test_f32x4_pmax => {
5159 [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5160 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5161 (max | f32x4_pmax)
5162 [1., 2., 0., 0.],
5163 }
5164
5165 test_f64x2_add => {
5166 [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5167 [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5168 }
5169
5170 test_f64x2_sub => {
5171 [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5172 [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5173 }
5174
5175 test_f64x2_mul => {
5176 [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5177 [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5178 }
5179
5180 test_f64x2_div => {
5181 [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5182 [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5183 }
5184
5185 test_f64x2_min => {
5186 [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5187 [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5188 }
5189
5190 test_f64x2_max => {
5191 [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5192 [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5193 }
5194
5195 test_f64x2_pmin => {
5196 [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5197 [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5198 }
5199
5200 test_f64x2_pmax => {
5201 [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5202 [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5203 }
5204 }
5205
5206 test_unop! {
5207 test_i8x16_abs => {
5208 (wrapping_abs | i8x16_abs)
5209 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5210
5211 (wrapping_abs | i8x16_abs)
5212 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5213
5214 (wrapping_abs | i8x16_abs)
5215 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5216 }
5217
5218 test_i8x16_neg => {
5219 (wrapping_neg | i8x16_neg)
5220 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5221
5222 (wrapping_neg | i8x16_neg)
5223 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5224
5225 (wrapping_neg | i8x16_neg)
5226 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5227 }
5228
5229 test_i16x8_abs => {
5230 (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5231 (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5232 }
5233
5234 test_i16x8_neg => {
5235 (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5236 (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5237 }
5238
5239 test_i32x4_abs => {
5240 (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5241 (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5242 }
5243
5244 test_i32x4_neg => {
5245 (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5246 (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5247 }
5248
5249 test_i64x2_abs => {
5250 (wrapping_abs | i64x2_abs) [1i64, 2],
5251 (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5252 }
5253
5254 test_i64x2_neg => {
5255 (wrapping_neg | i64x2_neg) [1i64, 2],
5256 (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5257 }
5258
5259 test_f32x4_ceil => {
5260 (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5261 (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5262 }
5263
5264 test_f32x4_floor => {
5265 (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5266 (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5267 }
5268
5269 test_f32x4_trunc => {
5270 (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5271 (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5272 }
5273
5274 test_f32x4_nearest => {
5275 (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5276 (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5277 }
5278
5279 test_f32x4_abs => {
5280 (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5281 (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5282 }
5283
5284 test_f32x4_neg => {
5285 (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5286 (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5287 }
5288
5289 test_f32x4_sqrt => {
5290 (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5291 (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5292 }
5293
5294 test_f64x2_ceil => {
5295 (ceil | f64x2_ceil) [1.0f64, 2.3],
5296 (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5297 }
5298
5299 test_f64x2_floor => {
5300 (floor | f64x2_floor) [1.0f64, 2.3],
5301 (floor | f64x2_floor) [f64::INFINITY, -0.1],
5302 }
5303
5304 test_f64x2_trunc => {
5305 (trunc | f64x2_trunc) [1.0f64, 2.3],
5306 (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5307 }
5308
5309 test_f64x2_nearest => {
5310 (round | f64x2_nearest) [1.0f64, 2.3],
5311 (round | f64x2_nearest) [f64::INFINITY, -0.1],
5312 }
5313
5314 test_f64x2_abs => {
5315 (abs | f64x2_abs) [1.0f64, 2.3],
5316 (abs | f64x2_abs) [f64::INFINITY, -0.1],
5317 }
5318
5319 test_f64x2_neg => {
5320 (neg | f64x2_neg) [1.0f64, 2.3],
5321 (neg | f64x2_neg) [f64::INFINITY, -0.1],
5322 }
5323
5324 test_f64x2_sqrt => {
5325 (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5326 (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5327 }
5328 }
5329
5330 macro_rules! floating_point {
5331 (f32) => {
5332 true
5333 };
5334 (f64) => {
5335 true
5336 };
5337 ($id:ident) => {
5338 false
5339 };
5340 }
5341
5342 trait IsNan: Sized {
5343 fn is_nan(self) -> bool {
5344 false
5345 }
5346 }
5347 impl IsNan for i8 {}
5348 impl IsNan for i16 {}
5349 impl IsNan for i32 {}
5350 impl IsNan for i64 {}
5351
5352 macro_rules! test_bop {
5353 ($id:ident[$ety:ident; $ecount:expr] |
5354 $binary_op:ident [$op_test_id:ident] :
5355 ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5356 test_bop!(
5357 $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5358 ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5359 );
5360
5361 };
5362 ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5363 $binary_op:ident [$op_test_id:ident] :
5364 ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5365 #[test]
5366 fn $op_test_id() {
5367 unsafe {
5368 let a_input: [$ety; $ecount] = [$($in_a),*];
5369 let b_input: [$ety; $ecount] = [$($in_b),*];
5370 let output: [$oty; $ecount] = [$($out),*];
5371
5372 let a_vec_in: v128 = transmute(a_input);
5373 let b_vec_in: v128 = transmute(b_input);
5374 let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5375
5376 let res: [$oty; $ecount] = transmute(vec_res);
5377
5378 if !floating_point!($ety) {
5379 assert_eq!(res, output);
5380 } else {
5381 for i in 0..$ecount {
5382 let r = res[i];
5383 let o = output[i];
5384 assert_eq!(r.is_nan(), o.is_nan());
5385 if !r.is_nan() {
5386 assert_eq!(r, o);
5387 }
5388 }
5389 }
5390 }
5391 }
5392 }
5393 }
5394
5395 macro_rules! test_bops {
5396 ($id:ident[$ety:ident; $ecount:expr] |
5397 $binary_op:ident [$op_test_id:ident]:
5398 ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5399 #[test]
5400 fn $op_test_id() {
5401 unsafe {
5402 let a_input: [$ety; $ecount] = [$($in_a),*];
5403 let output: [$ety; $ecount] = [$($out),*];
5404
5405 let a_vec_in: v128 = transmute(a_input);
5406 let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5407
5408 let res: [$ety; $ecount] = transmute(vec_res);
5409 assert_eq!(res, output);
5410 }
5411 }
5412 }
5413 }
5414
5415 macro_rules! test_uop {
5416 ($id:ident[$ety:ident; $ecount:expr] |
5417 $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5418 #[test]
5419 fn $op_test_id() {
5420 unsafe {
5421 let a_input: [$ety; $ecount] = [$($in_a),*];
5422 let output: [$ety; $ecount] = [$($out),*];
5423
5424 let a_vec_in: v128 = transmute(a_input);
5425 let vec_res: v128 = $unary_op(a_vec_in);
5426
5427 let res: [$ety; $ecount] = transmute(vec_res);
5428 assert_eq!(res, output);
5429 }
5430 }
5431 }
5432 }
5433
5434 test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5435 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5436 [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5437 test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5438 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5439 [0, -2, 4, 6, 8, 10, 12, -2]);
5440 test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5441 ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5442 test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5443 ([0, -1], 1) => [0, -2]);
5444
5445 test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5446 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5447 [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5448 test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5449 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5450 [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5451 test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5452 ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5453 test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5454 ([0, -1], 1) => [0, -1]);
5455
5456 test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5457 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5458 [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5459 test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5460 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5461 [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5462 test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5463 ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5464 test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5465 ([0, -1], 1) => [0, i64::MAX]);
5466
5467 #[test]
5468 fn v128_bitwise_logical_ops() {
5469 unsafe {
5470 let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5471 let b: [u32; 4] = [u32::MAX; 4];
5472 let c: [u32; 4] = [0; 4];
5473
5474 let vec_a: v128 = transmute(a);
5475 let vec_b: v128 = transmute(b);
5476 let vec_c: v128 = transmute(c);
5477
5478 let r: v128 = v128_and(vec_a, vec_a);
5479 compare_bytes(r, vec_a);
5480 let r: v128 = v128_and(vec_a, vec_b);
5481 compare_bytes(r, vec_a);
5482 let r: v128 = v128_andnot(vec_a, vec_b);
5483 compare_bytes(r, vec_c);
5484 let r: v128 = v128_andnot(vec_a, vec_a);
5485 compare_bytes(r, vec_c);
5486 let r: v128 = v128_andnot(vec_a, vec_c);
5487 compare_bytes(r, vec_a);
5488 let r: v128 = v128_or(vec_a, vec_b);
5489 compare_bytes(r, vec_b);
5490 let r: v128 = v128_not(vec_b);
5491 compare_bytes(r, vec_c);
5492 let r: v128 = v128_xor(vec_a, vec_c);
5493 compare_bytes(r, vec_a);
5494
5495 let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5496 compare_bytes(r, vec_b);
5497 let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5498 compare_bytes(r, vec_c);
5499 let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5500 compare_bytes(r, vec_a);
5501 }
5502 }
5503
5504 macro_rules! test_bool_red {
5505 ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5506 #[test]
5507 fn $test_id() {
5508 unsafe {
5509 let vec_a: v128 = transmute([$($true),*]); let vec_b: v128 = transmute([$($false),*]); let vec_c: v128 = transmute([$($alt),*]); assert_eq!($all(vec_a), true);
5519 assert_eq!($all(vec_b), false);
5520 assert_eq!($all(vec_c), false);
5521 }
5522 }
5523 }
5524 }
5525
5526 test_bool_red!(
5527 [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5528 | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5529 | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5530 | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5531 );
5532 test_bool_red!(
5533 [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5534 | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5535 | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5536 | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5537 );
5538 test_bool_red!(
5539 [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5540 | [1_i32, 1, 1, 1]
5541 | [0_i32, 0, 0, 0]
5542 | [1_i32, 0, 1, 0]
5543 );
5544 test_bool_red!(
5545 [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5546 | [1_i64, 1]
5547 | [0_i64, 0]
5548 | [1_i64, 0]
5549 );
5550
5551 test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5552 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5553 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5554 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5555 test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5556 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5557 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5558 test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5559 ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5560 test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5561 ([0, 1], [0, 2]) => [-1, 0]);
5562 test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5563 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5564 test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5565
5566 test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5567 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5568 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5569 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5570 test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5571 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5572 [0, -1, 0, -1 ,0, -1, 0, 0]);
5573 test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5574 ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5575 test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5576 ([0, 1], [0, 2]) => [0, -1]);
5577 test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5578 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5579 test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5580
5581 test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5582 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5583 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5584 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5585 test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5586 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5587 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5588 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5589 test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5590 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5591 [0, -1, 0, -1 ,0, -1, 0, -1]);
5592 test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5593 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5594 [0, -1, 0, -1 ,0, -1, 0, 0]);
5595 test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5596 ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5597 test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5598 ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5599 test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5600 ([-1, 3], [0, 2]) => [-1, 0]);
5601 test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5602 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5603 test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5604
5605 test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5606 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5607 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5608 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5609 test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5610 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5611 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5612 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5613 test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5614 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5615 [0, -1, 0, -1 ,0, -1, 0, 0]);
5616 test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5617 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5618 [0, -1, 0, -1 ,0, -1, 0, -1]);
5619 test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5620 ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5621 test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5622 ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5623 test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5624 ([-1, 2], [0, 1]) => [0, -1]);
5625 test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5626 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5627 test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5628
5629 test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5630 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5631 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5632 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5633 test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5634 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5635 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5636 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5637 test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5638 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5639 [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5640 test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5641 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5642 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5643 test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5644 ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5645 test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5646 ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5647 test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5648 ([0, 1], [-1, 2]) => [-1, 0]);
5649 test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5650 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5651 test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5652
5653 test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5654 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5655 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5656 ) =>
5657 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5658 test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5659 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5660 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5661 ) =>
5662 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5663 test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5664 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5665 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5666 test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5667 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5668 [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5669 test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5670 ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5671 test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5672 ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5673 test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5674 ([0, 2], [0, 1]) => [-1, 0]);
5675 test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5676 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5677 test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5678
5679 test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5680 test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5681 test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5682 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5683 test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5684 ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5685 => [0., -3., -4., f32::NAN]);
5686 test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5687 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5688 test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5689 ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5690 => [1., -1., 7., f32::NAN]);
5691 test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5692 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5693 test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5694 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5695 test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5696 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5697 test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5698 ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5699
5700 test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5701 test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5702 test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5703 ([0., -1.], [1., -3.]) => [0., -3.]);
5704 test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5705 ([7., 8.], [-4., f64::NAN])
5706 => [ -4., f64::NAN]);
5707 test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5708 ([0., -1.], [1., -3.]) => [1., -1.]);
5709 test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5710 ([7., 8.], [ -4., f64::NAN])
5711 => [7., f64::NAN]);
5712 test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5713 ([0., -1.], [1., -3.]) => [1., -4.]);
5714 test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5715 ([0., -1.], [1., -3.]) => [-1., 2.]);
5716 test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5717 ([0., -1.], [1., -3.]) => [0., 3.]);
5718 test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5719 ([0., -8.], [1., 4.]) => [0., -2.]);
5720
5721 macro_rules! test_conv {
5722 ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr, $to:expr) => {
5723 #[test]
5724 fn $test_id() {
5725 unsafe {
5726 let from: v128 = transmute($from);
5727 let to: v128 = transmute($to);
5728
5729 let r: v128 = $conv_id(from);
5730
5731 compare_bytes(r, to);
5732 }
5733 }
5734 };
5735 }
5736
5737 test_conv!(
5738 f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5739 [1_f32, 2., 3., 4.]
5740 );
5741 test_conv!(
5742 f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5743 [u32::MAX as f32, 2., 3., 4.]
5744 );
5745
5746 #[test]
5747 fn test_conversions() {
5748 compare_bytes(
5749 i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5750 i32x4(1, i32::MIN, i32::MAX, 0),
5751 );
5752 compare_bytes(
5753 u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5754 u32x4(1, 0, u32::MAX, 0),
5755 );
5756 compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5757 compare_bytes(
5758 f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5759 f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5760 );
5761 compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5762 compare_bytes(
5763 f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5764 f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5765 );
5766
5767 compare_bytes(
5768 i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5769 i32x4(1, i32::MIN, 0, 0),
5770 );
5771 compare_bytes(
5772 i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5773 i32x4(0, i32::MAX, 0, 0),
5774 );
5775 compare_bytes(
5776 u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5777 u32x4(1, 0, 0, 0),
5778 );
5779 compare_bytes(
5780 u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5781 u32x4(0, u32::MAX, 0, 0),
5782 );
5783 }
5784
5785 #[test]
5786 fn test_popcnt() {
5787 unsafe {
5788 for i in 0..=255 {
5789 compare_bytes(
5790 i8x16_popcnt(u8x16_splat(i)),
5791 u8x16_splat(i.count_ones() as u8),
5792 )
5793 }
5794
5795 let vectors = [
5796 [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5797 [
5798 100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5799 ],
5800 ];
5801
5802 for vector in vectors.iter() {
5803 compare_bytes(
5804 i8x16_popcnt(transmute(*vector)),
5805 i8x16(
5806 vector[0].count_ones() as i8,
5807 vector[1].count_ones() as i8,
5808 vector[2].count_ones() as i8,
5809 vector[3].count_ones() as i8,
5810 vector[4].count_ones() as i8,
5811 vector[5].count_ones() as i8,
5812 vector[6].count_ones() as i8,
5813 vector[7].count_ones() as i8,
5814 vector[8].count_ones() as i8,
5815 vector[9].count_ones() as i8,
5816 vector[10].count_ones() as i8,
5817 vector[11].count_ones() as i8,
5818 vector[12].count_ones() as i8,
5819 vector[13].count_ones() as i8,
5820 vector[14].count_ones() as i8,
5821 vector[15].count_ones() as i8,
5822 ),
5823 )
5824 }
5825 }
5826 }
5827
5828 #[test]
5829 fn test_promote_demote() {
5830 let tests = [
5831 [1., 2.],
5832 [f64::NAN, f64::INFINITY],
5833 [100., 201.],
5834 [0., -0.],
5835 [f64::NEG_INFINITY, 0.],
5836 ];
5837
5838 for [a, b] in tests {
5839 compare_bytes(
5840 f32x4_demote_f64x2_zero(f64x2(a, b)),
5841 f32x4(a as f32, b as f32, 0., 0.),
5842 );
5843 compare_bytes(
5844 f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5845 f64x2(a, b),
5846 );
5847 }
5848 }
5849
5850 #[test]
5851 fn test_extmul() {
5852 macro_rules! test {
5853 ($(
5854 $ctor:ident {
5855 from: $from:ident,
5856 to: $to:ident,
5857 low: $low:ident,
5858 high: $high:ident,
5859 } => {
5860 $(([$($a:tt)*] * [$($b:tt)*]))*
5861 }
5862 )*) => ($(
5863 $(unsafe {
5864 let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5865 let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5866 let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5867 let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5868
5869 let half = a.len() / 2;
5870 for i in 0..half {
5871 assert_eq!(
5872 (a[i] as $to).wrapping_mul((b[i] as $to)),
5873 low[i],
5874 "expected {} * {}", a[i] as $to, b[i] as $to,
5875 );
5876 assert_eq!(
5877 (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5878 high[i],
5879 "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5880 );
5881 }
5882 })*
5883 )*)
5884 }
5885 test! {
5886 i8x16 {
5887 from: i8,
5888 to: i16,
5889 low: i16x8_extmul_low_i8x16,
5890 high: i16x8_extmul_high_i8x16,
5891 } => {
5892 (
5893 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5894 *
5895 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5896 )
5897 (
5898 [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5899 *
5900 [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5901 )
5902 }
5903 u8x16 {
5904 from: u8,
5905 to: u16,
5906 low: u16x8_extmul_low_u8x16,
5907 high: u16x8_extmul_high_u8x16,
5908 } => {
5909 (
5910 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5911 *
5912 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5913 )
5914 (
5915 [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5916 *
5917 [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5918 )
5919 }
5920 i16x8 {
5921 from: i16,
5922 to: i32,
5923 low: i32x4_extmul_low_i16x8,
5924 high: i32x4_extmul_high_i16x8,
5925 } => {
5926 (
5927 [0, 0, 0, 0, 0, 0, 0, 0]
5928 *
5929 [0, 0, 0, 0, 0, 0, 0, 0]
5930 )
5931 (
5932 [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5933 *
5934 [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5935 )
5936 }
5937 u16x8 {
5938 from: u16,
5939 to: u32,
5940 low: u32x4_extmul_low_u16x8,
5941 high: u32x4_extmul_high_u16x8,
5942 } => {
5943 (
5944 [0, 0, 0, 0, 0, 0, 0, 0]
5945 *
5946 [0, 0, 0, 0, 0, 0, 0, 0]
5947 )
5948 (
5949 [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5950 *
5951 [1, 1, 3, 29391, 105, 2, 100, 2]
5952 )
5953 }
5954 i32x4 {
5955 from: i32,
5956 to: i64,
5957 low: i64x2_extmul_low_i32x4,
5958 high: i64x2_extmul_high_i32x4,
5959 } => {
5960 (
5961 [0, 0, 0, 0]
5962 *
5963 [0, 0, 0, 0]
5964 )
5965 (
5966 [-1, 0, i32::MAX, 19931]
5967 *
5968 [1, 1, i32::MIN, 29391]
5969 )
5970 (
5971 [i32::MAX, 3003183, 3 << 20, 0xffffff]
5972 *
5973 [i32::MAX, i32::MIN, -40042, 300]
5974 )
5975 }
5976 u32x4 {
5977 from: u32,
5978 to: u64,
5979 low: u64x2_extmul_low_u32x4,
5980 high: u64x2_extmul_high_u32x4,
5981 } => {
5982 (
5983 [0, 0, 0, 0]
5984 *
5985 [0, 0, 0, 0]
5986 )
5987 (
5988 [1, 0, u32::MAX, 19931]
5989 *
5990 [1, 1, 3, 29391]
5991 )
5992 (
5993 [u32::MAX, 3003183, 3 << 20, 0xffffff]
5994 *
5995 [u32::MAX, 3000, 40042, 300]
5996 )
5997 }
5998 }
5999 }
6000
6001 #[test]
6002 fn test_q15mulr_sat_s() {
6003 fn test(a: [i16; 8], b: [i16; 8]) {
6004 let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
6005 let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
6006 let result = i16x8_q15mulr_sat(a_v, b_v);
6007 let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
6008
6009 for (i, (a, b)) in a.iter().zip(&b).enumerate() {
6010 assert_eq!(
6011 result[i],
6012 (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
6013 );
6014 }
6015 }
6016
6017 test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
6018 test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
6019 test(
6020 [-1, 100, 2003, -29494, 12, 128, 994, 1],
6021 [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
6022 );
6023 }
6024
6025 #[test]
6026 fn test_extadd() {
6027 macro_rules! test {
6028 ($(
6029 $func:ident {
6030 from: $from:ident,
6031 to: $to:ident,
6032 } => {
6033 $([$($a:tt)*])*
6034 }
6035 )*) => ($(
6036 $(unsafe {
6037 let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6038 let a_v = mem::transmute::<_, v128>(a);
6039 let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6040
6041 let half = a.len() / 2;
6042 for i in 0..half {
6043 assert_eq!(
6044 (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6045 r[i],
6046 "failed {} + {} != {}",
6047 a[2 * i] as $to,
6048 a[2 * i + 1] as $to,
6049 r[i],
6050 );
6051 }
6052 })*
6053 )*)
6054 }
6055 test! {
6056 i16x8_extadd_pairwise_i8x16 {
6057 from: i8,
6058 to: i16,
6059 } => {
6060 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6061 [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6062 [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6063 }
6064 i16x8_extadd_pairwise_u8x16 {
6065 from: u8,
6066 to: i16,
6067 } => {
6068 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6069 [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6070 [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6071 }
6072 i32x4_extadd_pairwise_i16x8 {
6073 from: i16,
6074 to: i32,
6075 } => {
6076 [0, 0, 0, 0, 0, 0, 0, 0]
6077 [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6078 [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6079 }
6080 i32x4_extadd_pairwise_u16x8 {
6081 from: u16,
6082 to: i32,
6083 } => {
6084 [0, 0, 0, 0, 0, 0, 0, 0]
6085 [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6086 [1, 1, 3, 29391, 105, 2, 100, 2]
6087 }
6088 }
6089 }
6090}