@@ -53,7 +53,8 @@ use stdarch_test::assert_instr;
53
53
54
54
/// AES single round encryption.
55
55
#[ inline]
56
- #[ target_feature( enable = "crypto" ) ]
56
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
57
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "aes" ) ) ]
57
58
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
58
59
#[ cfg_attr( test, assert_instr( aese) ) ]
59
60
pub unsafe fn vaeseq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
@@ -62,7 +63,8 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
62
63
63
64
/// AES single round decryption.
64
65
#[ inline]
65
- #[ target_feature( enable = "crypto" ) ]
66
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
67
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "aes" ) ) ]
66
68
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
67
69
#[ cfg_attr( test, assert_instr( aesd) ) ]
68
70
pub unsafe fn vaesdq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
@@ -71,7 +73,8 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
71
73
72
74
/// AES mix columns.
73
75
#[ inline]
74
- #[ target_feature( enable = "crypto" ) ]
76
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
77
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "aes" ) ) ]
75
78
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
76
79
#[ cfg_attr( test, assert_instr( aesmc) ) ]
77
80
pub unsafe fn vaesmcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
@@ -80,7 +83,8 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
80
83
81
84
/// AES inverse mix columns.
82
85
#[ inline]
83
- #[ target_feature( enable = "crypto" ) ]
86
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
87
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "aes" ) ) ]
84
88
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
85
89
#[ cfg_attr( test, assert_instr( aesimc) ) ]
86
90
pub unsafe fn vaesimcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
@@ -89,7 +93,8 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
89
93
90
94
/// SHA1 fixed rotate.
91
95
#[ inline]
92
- #[ target_feature( enable = "crypto" ) ]
96
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
97
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
93
98
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
94
99
#[ cfg_attr( test, assert_instr( sha1h) ) ]
95
100
pub unsafe fn vsha1h_u32 ( hash_e : u32 ) -> u32 {
@@ -98,7 +103,8 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
98
103
99
104
/// SHA1 hash update accelerator, choose.
100
105
#[ inline]
101
- #[ target_feature( enable = "crypto" ) ]
106
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
107
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
102
108
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
103
109
#[ cfg_attr( test, assert_instr( sha1c) ) ]
104
110
pub unsafe fn vsha1cq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -107,7 +113,8 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
107
113
108
114
/// SHA1 hash update accelerator, majority.
109
115
#[ inline]
110
- #[ target_feature( enable = "crypto" ) ]
116
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
117
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
111
118
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
112
119
#[ cfg_attr( test, assert_instr( sha1m) ) ]
113
120
pub unsafe fn vsha1mq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -116,7 +123,8 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
116
123
117
124
/// SHA1 hash update accelerator, parity.
118
125
#[ inline]
119
- #[ target_feature( enable = "crypto" ) ]
126
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
127
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
120
128
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
121
129
#[ cfg_attr( test, assert_instr( sha1p) ) ]
122
130
pub unsafe fn vsha1pq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -125,7 +133,8 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
125
133
126
134
/// SHA1 schedule update accelerator, first part.
127
135
#[ inline]
128
- #[ target_feature( enable = "crypto" ) ]
136
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
137
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
129
138
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
130
139
#[ cfg_attr( test, assert_instr( sha1su0) ) ]
131
140
pub unsafe fn vsha1su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t , w8_11 : uint32x4_t ) -> uint32x4_t {
@@ -134,7 +143,8 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
134
143
135
144
/// SHA1 schedule update accelerator, second part.
136
145
#[ inline]
137
- #[ target_feature( enable = "crypto" ) ]
146
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
147
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
138
148
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
139
149
#[ cfg_attr( test, assert_instr( sha1su1) ) ]
140
150
pub unsafe fn vsha1su1q_u32 ( tw0_3 : uint32x4_t , w12_15 : uint32x4_t ) -> uint32x4_t {
@@ -143,7 +153,8 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
143
153
144
154
/// SHA256 hash update accelerator.
145
155
#[ inline]
146
- #[ target_feature( enable = "crypto" ) ]
156
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
157
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
147
158
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
148
159
#[ cfg_attr( test, assert_instr( sha256h) ) ]
149
160
pub unsafe fn vsha256hq_u32 (
@@ -156,7 +167,8 @@ pub unsafe fn vsha256hq_u32(
156
167
157
168
/// SHA256 hash update accelerator, upper part.
158
169
#[ inline]
159
- #[ target_feature( enable = "crypto" ) ]
170
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
171
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
160
172
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
161
173
#[ cfg_attr( test, assert_instr( sha256h2) ) ]
162
174
pub unsafe fn vsha256h2q_u32 (
@@ -169,7 +181,8 @@ pub unsafe fn vsha256h2q_u32(
169
181
170
182
/// SHA256 schedule update accelerator, first part.
171
183
#[ inline]
172
- #[ target_feature( enable = "crypto" ) ]
184
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
185
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
173
186
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
174
187
#[ cfg_attr( test, assert_instr( sha256su0) ) ]
175
188
pub unsafe fn vsha256su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t ) -> uint32x4_t {
@@ -178,7 +191,8 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
178
191
179
192
/// SHA256 schedule update accelerator, second part.
180
193
#[ inline]
181
- #[ target_feature( enable = "crypto" ) ]
194
+ #[ cfg_attr( bootstrap, target_feature( enable = "crypto" ) ) ]
195
+ #[ cfg_attr( not( bootstrap) , target_feature( enable = "sha2" ) ) ]
182
196
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
183
197
#[ cfg_attr( test, assert_instr( sha256su1) ) ]
184
198
pub unsafe fn vsha256su1q_u32 (
@@ -209,7 +223,8 @@ mod tests {
209
223
) ;
210
224
}
211
225
212
- #[ simd_test( enable = "crypto" ) ]
226
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
227
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "aes" ) ) ]
213
228
unsafe fn test_vaesdq_u8 ( ) {
214
229
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
215
230
let key = mem:: transmute ( u8x16:: new ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ) ;
@@ -220,7 +235,8 @@ mod tests {
220
235
) ;
221
236
}
222
237
223
- #[ simd_test( enable = "crypto" ) ]
238
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
239
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "aes" ) ) ]
224
240
unsafe fn test_vaesmcq_u8 ( ) {
225
241
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
226
242
let r: u8x16 = mem:: transmute ( vaesmcq_u8 ( data) ) ;
@@ -230,7 +246,8 @@ mod tests {
230
246
) ;
231
247
}
232
248
233
- #[ simd_test( enable = "crypto" ) ]
249
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
250
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "aes" ) ) ]
234
251
unsafe fn test_vaesimcq_u8 ( ) {
235
252
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
236
253
let r: u8x16 = mem:: transmute ( vaesimcq_u8 ( data) ) ;
@@ -240,13 +257,15 @@ mod tests {
240
257
) ;
241
258
}
242
259
243
- #[ simd_test( enable = "crypto" ) ]
260
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
261
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
244
262
unsafe fn test_vsha1h_u32 ( ) {
245
263
assert_eq ! ( vsha1h_u32( 0x1234 ) , 0x048d ) ;
246
264
assert_eq ! ( vsha1h_u32( 0x5678 ) , 0x159e ) ;
247
265
}
248
266
249
- #[ simd_test( enable = "crypto" ) ]
267
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
268
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
250
269
unsafe fn test_vsha1su0q_u32 ( ) {
251
270
let r: u32x4 = mem:: transmute ( vsha1su0q_u32 (
252
271
mem:: transmute ( u32x4:: new ( 0x1234_u32 , 0x5678_u32 , 0x9abc_u32 , 0xdef0_u32 ) ) ,
@@ -256,7 +275,8 @@ mod tests {
256
275
assert_eq ! ( r, u32x4:: new( 0x9abc , 0xdef0 , 0x1234 , 0x5678 ) ) ;
257
276
}
258
277
259
- #[ simd_test( enable = "crypto" ) ]
278
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
279
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
260
280
unsafe fn test_vsha1su1q_u32 ( ) {
261
281
let r: u32x4 = mem:: transmute ( vsha1su1q_u32 (
262
282
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -268,7 +288,8 @@ mod tests {
268
288
) ;
269
289
}
270
290
271
- #[ simd_test( enable = "crypto" ) ]
291
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
292
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
272
293
unsafe fn test_vsha1cq_u32 ( ) {
273
294
let r: u32x4 = mem:: transmute ( vsha1cq_u32 (
274
295
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -281,7 +302,8 @@ mod tests {
281
302
) ;
282
303
}
283
304
284
- #[ simd_test( enable = "crypto" ) ]
305
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
306
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
285
307
unsafe fn test_vsha1pq_u32 ( ) {
286
308
let r: u32x4 = mem:: transmute ( vsha1pq_u32 (
287
309
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -294,7 +316,8 @@ mod tests {
294
316
) ;
295
317
}
296
318
297
- #[ simd_test( enable = "crypto" ) ]
319
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
320
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
298
321
unsafe fn test_vsha1mq_u32 ( ) {
299
322
let r: u32x4 = mem:: transmute ( vsha1mq_u32 (
300
323
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -307,7 +330,8 @@ mod tests {
307
330
) ;
308
331
}
309
332
310
- #[ simd_test( enable = "crypto" ) ]
333
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
334
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
311
335
unsafe fn test_vsha256hq_u32 ( ) {
312
336
let r: u32x4 = mem:: transmute ( vsha256hq_u32 (
313
337
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -320,7 +344,8 @@ mod tests {
320
344
) ;
321
345
}
322
346
323
- #[ simd_test( enable = "crypto" ) ]
347
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
348
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
324
349
unsafe fn test_vsha256h2q_u32 ( ) {
325
350
let r: u32x4 = mem:: transmute ( vsha256h2q_u32 (
326
351
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -333,7 +358,8 @@ mod tests {
333
358
) ;
334
359
}
335
360
336
- #[ simd_test( enable = "crypto" ) ]
361
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
362
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
337
363
unsafe fn test_vsha256su0q_u32 ( ) {
338
364
let r: u32x4 = mem:: transmute ( vsha256su0q_u32 (
339
365
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -345,7 +371,8 @@ mod tests {
345
371
) ;
346
372
}
347
373
348
- #[ simd_test( enable = "crypto" ) ]
374
+ #[ cfg_attr( bootstrap, simd_test( enable = "crypto" ) ) ]
375
+ #[ cfg_attr( not( bootstrap) , simd_test( enable = "sha2" ) ) ]
349
376
unsafe fn test_vsha256su1q_u32 ( ) {
350
377
let r: u32x4 = mem:: transmute ( vsha256su1q_u32 (
351
378
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
0 commit comments