@@ -23,10 +23,10 @@ use stdarch_test::assert_instr;
23
23
types ! {
24
24
/// ARM-specific 64-bit wide vector of one packed `f64`.
25
25
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
26
- pub struct float64x1_t( f64 ) ; // FIXME: check this!
26
+ pub struct float64x1_t( 1 x f64 ) ; // FIXME: check this!
27
27
/// ARM-specific 128-bit wide vector of two packed `f64`.
28
28
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
29
- pub struct float64x2_t( f64 , f64 ) ;
29
+ pub struct float64x2_t( 2 x f64 ) ;
30
30
}
31
31
32
32
/// ARM-specific type containing two `float64x1_t` vectors.
@@ -1061,7 +1061,7 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t {
1061
1061
#[ cfg_attr( test, assert_instr( bsl) ) ]
1062
1062
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1063
1063
pub unsafe fn vbsl_f64 ( a : uint64x1_t , b : float64x1_t , c : float64x1_t ) -> float64x1_t {
1064
- let not = int64x1_t ( -1 ) ;
1064
+ let not = int64x1_t:: splat ( -1 ) ;
1065
1065
transmute ( simd_or (
1066
1066
simd_and ( a, transmute ( b) ) ,
1067
1067
simd_and ( simd_xor ( a, transmute ( not) ) , transmute ( c) ) ,
@@ -1073,7 +1073,7 @@ pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64
1073
1073
#[ cfg_attr( test, assert_instr( bsl) ) ]
1074
1074
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1075
1075
pub unsafe fn vbsl_p64 ( a : poly64x1_t , b : poly64x1_t , c : poly64x1_t ) -> poly64x1_t {
1076
- let not = int64x1_t ( -1 ) ;
1076
+ let not = int64x1_t:: splat ( -1 ) ;
1077
1077
simd_or ( simd_and ( a, b) , simd_and ( simd_xor ( a, transmute ( not) ) , c) )
1078
1078
}
1079
1079
/// Bitwise Select. (128-bit)
@@ -1082,7 +1082,7 @@ pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_
1082
1082
#[ cfg_attr( test, assert_instr( bsl) ) ]
1083
1083
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1084
1084
pub unsafe fn vbslq_f64 ( a : uint64x2_t , b : float64x2_t , c : float64x2_t ) -> float64x2_t {
1085
- let not = int64x2_t ( - 1 , -1 ) ;
1085
+ let not = int64x2_t:: splat ( -1 ) ;
1086
1086
transmute ( simd_or (
1087
1087
simd_and ( a, transmute ( b) ) ,
1088
1088
simd_and ( simd_xor ( a, transmute ( not) ) , transmute ( c) ) ,
@@ -1094,7 +1094,7 @@ pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float6
1094
1094
#[ cfg_attr( test, assert_instr( bsl) ) ]
1095
1095
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1096
1096
pub unsafe fn vbslq_p64 ( a : poly64x2_t , b : poly64x2_t , c : poly64x2_t ) -> poly64x2_t {
1097
- let not = int64x2_t ( - 1 , -1 ) ;
1097
+ let not = int64x2_t:: splat ( -1 ) ;
1098
1098
simd_or ( simd_and ( a, b) , simd_and ( simd_xor ( a, transmute ( not) ) , c) )
1099
1099
}
1100
1100
@@ -1976,7 +1976,7 @@ pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t {
1976
1976
#[ cfg_attr( test, assert_instr( nop) ) ]
1977
1977
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1978
1978
pub unsafe fn vdup_n_f64 ( value : f64 ) -> float64x1_t {
1979
- float64x1_t ( value)
1979
+ float64x1_t:: splat ( value)
1980
1980
}
1981
1981
1982
1982
/// Duplicate vector element to vector or scalar
@@ -1994,7 +1994,7 @@ pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t {
1994
1994
#[ cfg_attr( test, assert_instr( dup) ) ]
1995
1995
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
1996
1996
pub unsafe fn vdupq_n_f64 ( value : f64 ) -> float64x2_t {
1997
- float64x2_t ( value , value)
1997
+ float64x2_t:: splat ( value)
1998
1998
}
1999
1999
2000
2000
/// Duplicate vector element to vector or scalar
@@ -2040,7 +2040,7 @@ pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t {
2040
2040
#[ cfg_attr( all( test, target_env = "msvc" ) , assert_instr( dup) ) ]
2041
2041
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
2042
2042
pub unsafe fn vget_high_f64 ( a : float64x2_t ) -> float64x1_t {
2043
- float64x1_t ( simd_extract ! ( a, 1 ) )
2043
+ float64x1_t ( [ simd_extract ! ( a, 1 ) ] )
2044
2044
}
2045
2045
2046
2046
/// Duplicate vector element to vector or scalar
@@ -2058,7 +2058,7 @@ pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t {
2058
2058
#[ cfg_attr( test, assert_instr( nop) ) ]
2059
2059
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
2060
2060
pub unsafe fn vget_low_f64 ( a : float64x2_t ) -> float64x1_t {
2061
- float64x1_t ( simd_extract ! ( a, 0 ) )
2061
+ float64x1_t ( [ simd_extract ! ( a, 0 ) ] )
2062
2062
}
2063
2063
2064
2064
/// Duplicate vector element to vector or scalar
0 commit comments