13
13
// Setting up structs that can be used as const vectors
14
14
#[ repr( simd) ]
15
15
#[ derive( Clone ) ]
16
- pub struct i8x2 ( i8 , i8 ) ;
16
+ pub struct i8x2 ( [ i8 ; 2 ] ) ;
17
17
18
18
#[ repr( simd) ]
19
19
#[ derive( Clone ) ]
20
- pub struct i8x2_arr ( [ i8 ; 2 ] ) ;
21
-
22
- #[ repr( simd) ]
23
- #[ derive( Clone ) ]
24
- pub struct f32x2 ( f32 , f32 ) ;
25
-
26
- #[ repr( simd) ]
27
- #[ derive( Clone ) ]
28
- pub struct f32x2_arr ( [ f32 ; 2 ] ) ;
20
+ pub struct f32x2 ( [ f32 ; 2 ] ) ;
29
21
30
22
#[ repr( simd, packed) ]
31
23
#[ derive( Copy , Clone ) ]
@@ -35,42 +27,34 @@ pub struct Simd<T, const N: usize>([T; N]);
35
27
// that they are called with a const vector
36
28
37
29
extern "unadjusted" {
38
- #[ no_mangle]
39
30
fn test_i8x2 ( a : i8x2 ) ;
40
31
}
41
32
42
33
extern "unadjusted" {
43
- #[ no_mangle]
44
34
fn test_i8x2_two_args ( a : i8x2 , b : i8x2 ) ;
45
35
}
46
36
47
37
extern "unadjusted" {
48
- #[ no_mangle]
49
38
fn test_i8x2_mixed_args ( a : i8x2 , c : i32 , b : i8x2 ) ;
50
39
}
51
40
52
41
extern "unadjusted" {
53
- #[ no_mangle]
54
- fn test_i8x2_arr ( a : i8x2_arr ) ;
42
+ fn test_i8x2_arr ( a : i8x2 ) ;
55
43
}
56
44
57
45
extern "unadjusted" {
58
- #[ no_mangle]
59
46
fn test_f32x2 ( a : f32x2 ) ;
60
47
}
61
48
62
49
extern "unadjusted" {
63
- #[ no_mangle]
64
- fn test_f32x2_arr ( a : f32x2_arr ) ;
50
+ fn test_f32x2_arr ( a : f32x2 ) ;
65
51
}
66
52
67
53
extern "unadjusted" {
68
- #[ no_mangle]
69
54
fn test_simd ( a : Simd < i32 , 4 > ) ;
70
55
}
71
56
72
57
extern "unadjusted" {
73
- #[ no_mangle]
74
58
fn test_simd_unaligned ( a : Simd < i32 , 3 > ) ;
75
59
}
76
60
@@ -81,22 +65,22 @@ extern "unadjusted" {
81
65
pub fn do_call ( ) {
82
66
unsafe {
83
67
// CHECK: call void @test_i8x2(<2 x i8> <i8 32, i8 64>
84
- test_i8x2 ( const { i8x2 ( 32 , 64 ) } ) ;
68
+ test_i8x2 ( const { i8x2 ( [ 32 , 64 ] ) } ) ;
85
69
86
70
// CHECK: call void @test_i8x2_two_args(<2 x i8> <i8 32, i8 64>, <2 x i8> <i8 8, i8 16>
87
- test_i8x2_two_args ( const { i8x2 ( 32 , 64 ) } , const { i8x2 ( 8 , 16 ) } ) ;
71
+ test_i8x2_two_args ( const { i8x2 ( [ 32 , 64 ] ) } , const { i8x2 ( [ 8 , 16 ] ) } ) ;
88
72
89
73
// CHECK: call void @test_i8x2_mixed_args(<2 x i8> <i8 32, i8 64>, i32 43, <2 x i8> <i8 8, i8 16>
90
- test_i8x2_mixed_args ( const { i8x2 ( 32 , 64 ) } , 43 , const { i8x2 ( 8 , 16 ) } ) ;
74
+ test_i8x2_mixed_args ( const { i8x2 ( [ 32 , 64 ] ) } , 43 , const { i8x2 ( [ 8 , 16 ] ) } ) ;
91
75
92
76
// CHECK: call void @test_i8x2_arr(<2 x i8> <i8 32, i8 64>
93
- test_i8x2_arr ( const { i8x2_arr ( [ 32 , 64 ] ) } ) ;
77
+ test_i8x2_arr ( const { i8x2 ( [ 32 , 64 ] ) } ) ;
94
78
95
79
// CHECK: call void @test_f32x2(<2 x float> <float 0x3FD47AE140000000, float 0x3FE47AE140000000>
96
- test_f32x2 ( const { f32x2 ( 0.32 , 0.64 ) } ) ;
80
+ test_f32x2 ( const { f32x2 ( [ 0.32 , 0.64 ] ) } ) ;
97
81
98
82
// CHECK: void @test_f32x2_arr(<2 x float> <float 0x3FD47AE140000000, float 0x3FE47AE140000000>
99
- test_f32x2_arr ( const { f32x2_arr ( [ 0.32 , 0.64 ] ) } ) ;
83
+ test_f32x2_arr ( const { f32x2 ( [ 0.32 , 0.64 ] ) } ) ;
100
84
101
85
// CHECK: call void @test_simd(<4 x i32> <i32 2, i32 4, i32 6, i32 8>
102
86
test_simd ( const { Simd :: < i32 , 4 > ( [ 2 , 4 , 6 , 8 ] ) } ) ;
0 commit comments