Skip to content

Commit 5520a91

Browse files
TDeckingAmanieu
authored andcommitted
Use generic simd in wasm intrinsics
1 parent 2227b97 commit 5520a91

File tree

1 file changed

+17
-47
lines changed

1 file changed

+17
-47
lines changed

crates/core_arch/src/wasm32/simd128.rs

Lines changed: 17 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -84,18 +84,12 @@ extern "C" {
8484

8585
#[link_name = "llvm.wasm.alltrue.v16i8"]
8686
fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87-
#[link_name = "llvm.ctpop.v16i8"]
88-
fn llvm_popcnt(a: simd::i8x16) -> simd::i8x16;
8987
#[link_name = "llvm.wasm.bitmask.v16i8"]
9088
fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
9189
#[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
9290
fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
9391
#[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
9492
fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
95-
#[link_name = "llvm.sadd.sat.v16i8"]
96-
fn llvm_i8x16_add_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
97-
#[link_name = "llvm.uadd.sat.v16i8"]
98-
fn llvm_i8x16_add_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
9993
#[link_name = "llvm.wasm.sub.sat.signed.v16i8"]
10094
fn llvm_i8x16_sub_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
10195
#[link_name = "llvm.wasm.sub.sat.unsigned.v16i8"]
@@ -117,10 +111,6 @@ extern "C" {
117111
fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
118112
#[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
119113
fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
120-
#[link_name = "llvm.sadd.sat.v8i16"]
121-
fn llvm_i16x8_add_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
122-
#[link_name = "llvm.uadd.sat.v8i16"]
123-
fn llvm_i16x8_add_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
124114
#[link_name = "llvm.wasm.sub.sat.signed.v8i16"]
125115
fn llvm_i16x8_sub_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
126116
#[link_name = "llvm.wasm.sub.sat.unsigned.v8i16"]
@@ -144,35 +134,15 @@ extern "C" {
144134
#[link_name = "llvm.wasm.bitmask.v2i64"]
145135
fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
146136

147-
#[link_name = "llvm.ceil.v4f32"]
148-
fn llvm_f32x4_ceil(x: simd::f32x4) -> simd::f32x4;
149-
#[link_name = "llvm.floor.v4f32"]
150-
fn llvm_f32x4_floor(x: simd::f32x4) -> simd::f32x4;
151-
#[link_name = "llvm.trunc.v4f32"]
152-
fn llvm_f32x4_trunc(x: simd::f32x4) -> simd::f32x4;
153137
#[link_name = "llvm.nearbyint.v4f32"]
154138
fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
155-
#[link_name = "llvm.fabs.v4f32"]
156-
fn llvm_f32x4_abs(x: simd::f32x4) -> simd::f32x4;
157-
#[link_name = "llvm.sqrt.v4f32"]
158-
fn llvm_f32x4_sqrt(x: simd::f32x4) -> simd::f32x4;
159139
#[link_name = "llvm.minimum.v4f32"]
160140
fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
161141
#[link_name = "llvm.maximum.v4f32"]
162142
fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
163143

164-
#[link_name = "llvm.ceil.v2f64"]
165-
fn llvm_f64x2_ceil(x: simd::f64x2) -> simd::f64x2;
166-
#[link_name = "llvm.floor.v2f64"]
167-
fn llvm_f64x2_floor(x: simd::f64x2) -> simd::f64x2;
168-
#[link_name = "llvm.trunc.v2f64"]
169-
fn llvm_f64x2_trunc(x: simd::f64x2) -> simd::f64x2;
170144
#[link_name = "llvm.nearbyint.v2f64"]
171145
fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
172-
#[link_name = "llvm.fabs.v2f64"]
173-
fn llvm_f64x2_abs(x: simd::f64x2) -> simd::f64x2;
174-
#[link_name = "llvm.sqrt.v2f64"]
175-
fn llvm_f64x2_sqrt(x: simd::f64x2) -> simd::f64x2;
176146
#[link_name = "llvm.minimum.v2f64"]
177147
fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
178148
#[link_name = "llvm.maximum.v2f64"]
@@ -2284,7 +2254,7 @@ pub fn i8x16_neg(a: v128) -> v128 {
22842254
#[doc(alias("i8x16.popcnt"))]
22852255
#[stable(feature = "wasm_simd", since = "1.54.0")]
22862256
pub fn i8x16_popcnt(v: v128) -> v128 {
2287-
unsafe { llvm_popcnt(v.as_i8x16()).v128() }
2257+
unsafe { simd_ctpop(v.as_i8x16()).v128() }
22882258
}
22892259

22902260
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -2410,7 +2380,7 @@ pub use i8x16_add as u8x16_add;
24102380
#[doc(alias("i8x16.add_sat_s"))]
24112381
#[stable(feature = "wasm_simd", since = "1.54.0")]
24122382
pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2413-
unsafe { llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16()).v128() }
2383+
unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
24142384
}
24152385

24162386
/// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned
@@ -2421,7 +2391,7 @@ pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
24212391
#[doc(alias("i8x16.add_sat_u"))]
24222392
#[stable(feature = "wasm_simd", since = "1.54.0")]
24232393
pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2424-
unsafe { llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16()).v128() }
2394+
unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
24252395
}
24262396

24272397
/// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers.
@@ -2778,7 +2748,7 @@ pub use i16x8_add as u16x8_add;
27782748
#[doc(alias("i16x8.add_sat_s"))]
27792749
#[stable(feature = "wasm_simd", since = "1.54.0")]
27802750
pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2781-
unsafe { llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8()).v128() }
2751+
unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
27822752
}
27832753

27842754
/// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned
@@ -2789,7 +2759,7 @@ pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
27892759
#[doc(alias("i16x8.add_sat_u"))]
27902760
#[stable(feature = "wasm_simd", since = "1.54.0")]
27912761
pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2792-
unsafe { llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8()).v128() }
2762+
unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
27932763
}
27942764

27952765
/// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers.
@@ -3725,7 +3695,7 @@ pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
37253695
#[doc(alias("f32x4.ceil"))]
37263696
#[stable(feature = "wasm_simd", since = "1.54.0")]
37273697
pub fn f32x4_ceil(a: v128) -> v128 {
3728-
unsafe { llvm_f32x4_ceil(a.as_f32x4()).v128() }
3698+
unsafe { simd_ceil(a.as_f32x4()).v128() }
37293699
}
37303700

37313701
/// Lane-wise rounding to the nearest integral value not greater than the input.
@@ -3735,7 +3705,7 @@ pub fn f32x4_ceil(a: v128) -> v128 {
37353705
#[doc(alias("f32x4.floor"))]
37363706
#[stable(feature = "wasm_simd", since = "1.54.0")]
37373707
pub fn f32x4_floor(a: v128) -> v128 {
3738-
unsafe { llvm_f32x4_floor(a.as_f32x4()).v128() }
3708+
unsafe { simd_floor(a.as_f32x4()).v128() }
37393709
}
37403710

37413711
/// Lane-wise rounding to the nearest integral value with the magnitude not
@@ -3746,7 +3716,7 @@ pub fn f32x4_floor(a: v128) -> v128 {
37463716
#[doc(alias("f32x4.trunc"))]
37473717
#[stable(feature = "wasm_simd", since = "1.54.0")]
37483718
pub fn f32x4_trunc(a: v128) -> v128 {
3749-
unsafe { llvm_f32x4_trunc(a.as_f32x4()).v128() }
3719+
unsafe { simd_trunc(a.as_f32x4()).v128() }
37503720
}
37513721

37523722
/// Lane-wise rounding to the nearest integral value; if two values are equally
@@ -3768,7 +3738,7 @@ pub fn f32x4_nearest(a: v128) -> v128 {
37683738
#[doc(alias("f32x4.abs"))]
37693739
#[stable(feature = "wasm_simd", since = "1.54.0")]
37703740
pub fn f32x4_abs(a: v128) -> v128 {
3771-
unsafe { llvm_f32x4_abs(a.as_f32x4()).v128() }
3741+
unsafe { simd_fabs(a.as_f32x4()).v128() }
37723742
}
37733743

37743744
/// Negates each lane of a 128-bit vector interpreted as four 32-bit floating
@@ -3779,7 +3749,7 @@ pub fn f32x4_abs(a: v128) -> v128 {
37793749
#[doc(alias("f32x4.neg"))]
37803750
#[stable(feature = "wasm_simd", since = "1.54.0")]
37813751
pub fn f32x4_neg(a: v128) -> v128 {
3782-
f32x4_mul(a, f32x4_splat(-1.))
3752+
unsafe { simd_neg(a.as_f32x4()).v128() }
37833753
}
37843754

37853755
/// Calculates the square root of each lane of a 128-bit vector interpreted as
@@ -3790,7 +3760,7 @@ pub fn f32x4_neg(a: v128) -> v128 {
37903760
#[doc(alias("f32x4.sqrt"))]
37913761
#[stable(feature = "wasm_simd", since = "1.54.0")]
37923762
pub fn f32x4_sqrt(a: v128) -> v128 {
3793-
unsafe { llvm_f32x4_sqrt(a.as_f32x4()).v128() }
3763+
unsafe { simd_fsqrt(a.as_f32x4()).v128() }
37943764
}
37953765

37963766
/// Lane-wise addition of two 128-bit vectors interpreted as four 32-bit
@@ -3900,7 +3870,7 @@ pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
39003870
#[doc(alias("f64x2.ceil"))]
39013871
#[stable(feature = "wasm_simd", since = "1.54.0")]
39023872
pub fn f64x2_ceil(a: v128) -> v128 {
3903-
unsafe { llvm_f64x2_ceil(a.as_f64x2()).v128() }
3873+
unsafe { simd_ceil(a.as_f64x2()).v128() }
39043874
}
39053875

39063876
/// Lane-wise rounding to the nearest integral value not greater than the input.
@@ -3910,7 +3880,7 @@ pub fn f64x2_ceil(a: v128) -> v128 {
39103880
#[doc(alias("f64x2.floor"))]
39113881
#[stable(feature = "wasm_simd", since = "1.54.0")]
39123882
pub fn f64x2_floor(a: v128) -> v128 {
3913-
unsafe { llvm_f64x2_floor(a.as_f64x2()).v128() }
3883+
unsafe { simd_floor(a.as_f64x2()).v128() }
39143884
}
39153885

39163886
/// Lane-wise rounding to the nearest integral value with the magnitude not
@@ -3921,7 +3891,7 @@ pub fn f64x2_floor(a: v128) -> v128 {
39213891
#[doc(alias("f64x2.trunc"))]
39223892
#[stable(feature = "wasm_simd", since = "1.54.0")]
39233893
pub fn f64x2_trunc(a: v128) -> v128 {
3924-
unsafe { llvm_f64x2_trunc(a.as_f64x2()).v128() }
3894+
unsafe { simd_trunc(a.as_f64x2()).v128() }
39253895
}
39263896

39273897
/// Lane-wise rounding to the nearest integral value; if two values are equally
@@ -3943,7 +3913,7 @@ pub fn f64x2_nearest(a: v128) -> v128 {
39433913
#[doc(alias("f64x2.abs"))]
39443914
#[stable(feature = "wasm_simd", since = "1.54.0")]
39453915
pub fn f64x2_abs(a: v128) -> v128 {
3946-
unsafe { llvm_f64x2_abs(a.as_f64x2()).v128() }
3916+
unsafe { simd_fabs(a.as_f64x2()).v128() }
39473917
}
39483918

39493919
/// Negates each lane of a 128-bit vector interpreted as two 64-bit floating
@@ -3954,7 +3924,7 @@ pub fn f64x2_abs(a: v128) -> v128 {
39543924
#[doc(alias("f64x2.neg"))]
39553925
#[stable(feature = "wasm_simd", since = "1.54.0")]
39563926
pub fn f64x2_neg(a: v128) -> v128 {
3957-
f64x2_mul(a, f64x2_splat(-1.0))
3927+
unsafe { simd_neg(a.as_f64x2()).v128() }
39583928
}
39593929

39603930
/// Calculates the square root of each lane of a 128-bit vector interpreted as
@@ -3965,7 +3935,7 @@ pub fn f64x2_neg(a: v128) -> v128 {
39653935
#[doc(alias("f64x2.sqrt"))]
39663936
#[stable(feature = "wasm_simd", since = "1.54.0")]
39673937
pub fn f64x2_sqrt(a: v128) -> v128 {
3968-
unsafe { llvm_f64x2_sqrt(a.as_f64x2()).v128() }
3938+
unsafe { simd_fsqrt(a.as_f64x2()).v128() }
39693939
}
39703940

39713941
/// Lane-wise add of two 128-bit vectors interpreted as two 64-bit

0 commit comments

Comments
 (0)