@@ -51,9 +51,16 @@ extern "C" {
51
51
#[ cfg( test) ]
52
52
use stdarch_test:: assert_instr;
53
53
54
+ // Rust compilers without 8a57820bca64a252489790a57cb5ea23db6f9198 need crypto (hence the bootstrap check)
55
+ // LLVM builds without b8baa2a9132498ea286dbb0d03f005760ecc6fdb need crypto for arm (hence the target_arch check)
56
+
54
57
/// AES single round encryption.
55
58
#[ inline]
56
- #[ target_feature( enable = "crypto" ) ]
59
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
60
+ #[ cfg_attr(
61
+ not( any( bootstrap, target_arch = "arm" ) ) ,
62
+ target_feature( enable = "aes" )
63
+ ) ]
57
64
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
58
65
#[ cfg_attr( test, assert_instr( aese) ) ]
59
66
pub unsafe fn vaeseq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
@@ -62,7 +69,11 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
62
69
63
70
/// AES single round decryption.
64
71
#[ inline]
65
- #[ target_feature( enable = "crypto" ) ]
72
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
73
+ #[ cfg_attr(
74
+ not( any( bootstrap, target_arch = "arm" ) ) ,
75
+ target_feature( enable = "aes" )
76
+ ) ]
66
77
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
67
78
#[ cfg_attr( test, assert_instr( aesd) ) ]
68
79
pub unsafe fn vaesdq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
@@ -71,7 +82,11 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
71
82
72
83
/// AES mix columns.
73
84
#[ inline]
74
- #[ target_feature( enable = "crypto" ) ]
85
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
86
+ #[ cfg_attr(
87
+ not( any( bootstrap, target_arch = "arm" ) ) ,
88
+ target_feature( enable = "aes" )
89
+ ) ]
75
90
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
76
91
#[ cfg_attr( test, assert_instr( aesmc) ) ]
77
92
pub unsafe fn vaesmcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
@@ -80,7 +95,11 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
80
95
81
96
/// AES inverse mix columns.
82
97
#[ inline]
83
- #[ target_feature( enable = "crypto" ) ]
98
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
99
+ #[ cfg_attr(
100
+ not( any( bootstrap, target_arch = "arm" ) ) ,
101
+ target_feature( enable = "aes" )
102
+ ) ]
84
103
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
85
104
#[ cfg_attr( test, assert_instr( aesimc) ) ]
86
105
pub unsafe fn vaesimcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
@@ -89,7 +108,11 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
89
108
90
109
/// SHA1 fixed rotate.
91
110
#[ inline]
92
- #[ target_feature( enable = "crypto" ) ]
111
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
112
+ #[ cfg_attr(
113
+ not( any( bootstrap, target_arch = "arm" ) ) ,
114
+ target_feature( enable = "sha2" )
115
+ ) ]
93
116
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
94
117
#[ cfg_attr( test, assert_instr( sha1h) ) ]
95
118
pub unsafe fn vsha1h_u32 ( hash_e : u32 ) -> u32 {
@@ -98,7 +121,11 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
98
121
99
122
/// SHA1 hash update accelerator, choose.
100
123
#[ inline]
101
- #[ target_feature( enable = "crypto" ) ]
124
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
125
+ #[ cfg_attr(
126
+ not( any( bootstrap, target_arch = "arm" ) ) ,
127
+ target_feature( enable = "sha2" )
128
+ ) ]
102
129
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
103
130
#[ cfg_attr( test, assert_instr( sha1c) ) ]
104
131
pub unsafe fn vsha1cq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -107,7 +134,11 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
107
134
108
135
/// SHA1 hash update accelerator, majority.
109
136
#[ inline]
110
- #[ target_feature( enable = "crypto" ) ]
137
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
138
+ #[ cfg_attr(
139
+ not( any( bootstrap, target_arch = "arm" ) ) ,
140
+ target_feature( enable = "sha2" )
141
+ ) ]
111
142
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
112
143
#[ cfg_attr( test, assert_instr( sha1m) ) ]
113
144
pub unsafe fn vsha1mq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -116,7 +147,11 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
116
147
117
148
/// SHA1 hash update accelerator, parity.
118
149
#[ inline]
119
- #[ target_feature( enable = "crypto" ) ]
150
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
151
+ #[ cfg_attr(
152
+ not( any( bootstrap, target_arch = "arm" ) ) ,
153
+ target_feature( enable = "sha2" )
154
+ ) ]
120
155
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
121
156
#[ cfg_attr( test, assert_instr( sha1p) ) ]
122
157
pub unsafe fn vsha1pq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
@@ -125,7 +160,11 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
125
160
126
161
/// SHA1 schedule update accelerator, first part.
127
162
#[ inline]
128
- #[ target_feature( enable = "crypto" ) ]
163
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
164
+ #[ cfg_attr(
165
+ not( any( bootstrap, target_arch = "arm" ) ) ,
166
+ target_feature( enable = "sha2" )
167
+ ) ]
129
168
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
130
169
#[ cfg_attr( test, assert_instr( sha1su0) ) ]
131
170
pub unsafe fn vsha1su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t , w8_11 : uint32x4_t ) -> uint32x4_t {
@@ -134,7 +173,11 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
134
173
135
174
/// SHA1 schedule update accelerator, second part.
136
175
#[ inline]
137
- #[ target_feature( enable = "crypto" ) ]
176
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
177
+ #[ cfg_attr(
178
+ not( any( bootstrap, target_arch = "arm" ) ) ,
179
+ target_feature( enable = "sha2" )
180
+ ) ]
138
181
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
139
182
#[ cfg_attr( test, assert_instr( sha1su1) ) ]
140
183
pub unsafe fn vsha1su1q_u32 ( tw0_3 : uint32x4_t , w12_15 : uint32x4_t ) -> uint32x4_t {
@@ -143,7 +186,11 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
143
186
144
187
/// SHA256 hash update accelerator.
145
188
#[ inline]
146
- #[ target_feature( enable = "crypto" ) ]
189
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
190
+ #[ cfg_attr(
191
+ not( any( bootstrap, target_arch = "arm" ) ) ,
192
+ target_feature( enable = "sha2" )
193
+ ) ]
147
194
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
148
195
#[ cfg_attr( test, assert_instr( sha256h) ) ]
149
196
pub unsafe fn vsha256hq_u32 (
@@ -156,7 +203,11 @@ pub unsafe fn vsha256hq_u32(
156
203
157
204
/// SHA256 hash update accelerator, upper part.
158
205
#[ inline]
159
- #[ target_feature( enable = "crypto" ) ]
206
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
207
+ #[ cfg_attr(
208
+ not( any( bootstrap, target_arch = "arm" ) ) ,
209
+ target_feature( enable = "sha2" )
210
+ ) ]
160
211
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
161
212
#[ cfg_attr( test, assert_instr( sha256h2) ) ]
162
213
pub unsafe fn vsha256h2q_u32 (
@@ -169,7 +220,11 @@ pub unsafe fn vsha256h2q_u32(
169
220
170
221
/// SHA256 schedule update accelerator, first part.
171
222
#[ inline]
172
- #[ target_feature( enable = "crypto" ) ]
223
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
224
+ #[ cfg_attr(
225
+ not( any( bootstrap, target_arch = "arm" ) ) ,
226
+ target_feature( enable = "sha2" )
227
+ ) ]
173
228
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
174
229
#[ cfg_attr( test, assert_instr( sha256su0) ) ]
175
230
pub unsafe fn vsha256su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t ) -> uint32x4_t {
@@ -178,7 +233,11 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
178
233
179
234
/// SHA256 schedule update accelerator, second part.
180
235
#[ inline]
181
- #[ target_feature( enable = "crypto" ) ]
236
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , target_feature( enable = "crypto" ) ) ]
237
+ #[ cfg_attr(
238
+ not( any( bootstrap, target_arch = "arm" ) ) ,
239
+ target_feature( enable = "sha2" )
240
+ ) ]
182
241
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
183
242
#[ cfg_attr( test, assert_instr( sha256su1) ) ]
184
243
pub unsafe fn vsha256su1q_u32 (
@@ -196,7 +255,11 @@ mod tests {
196
255
use std:: mem;
197
256
use stdarch_test:: simd_test;
198
257
199
- #[ simd_test( enable = "crypto" ) ]
258
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
259
+ #[ cfg_attr(
260
+ all( not( bootstrap) , target_arch = "aarch64" ) ,
261
+ simd_test( enable = "aes" )
262
+ ) ]
200
263
unsafe fn test_vaeseq_u8 ( ) {
201
264
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
202
265
let key = mem:: transmute ( u8x16:: new ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ) ;
@@ -209,7 +272,11 @@ mod tests {
209
272
) ;
210
273
}
211
274
212
- #[ simd_test( enable = "crypto" ) ]
275
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
276
+ #[ cfg_attr(
277
+ all( not( bootstrap) , target_arch = "aarch64" ) ,
278
+ simd_test( enable = "aes" )
279
+ ) ]
213
280
unsafe fn test_vaesdq_u8 ( ) {
214
281
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
215
282
let key = mem:: transmute ( u8x16:: new ( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ) ) ;
@@ -220,7 +287,11 @@ mod tests {
220
287
) ;
221
288
}
222
289
223
- #[ simd_test( enable = "crypto" ) ]
290
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
291
+ #[ cfg_attr(
292
+ all( not( bootstrap) , target_arch = "aarch64" ) ,
293
+ simd_test( enable = "aes" )
294
+ ) ]
224
295
unsafe fn test_vaesmcq_u8 ( ) {
225
296
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
226
297
let r: u8x16 = mem:: transmute ( vaesmcq_u8 ( data) ) ;
@@ -230,7 +301,11 @@ mod tests {
230
301
) ;
231
302
}
232
303
233
- #[ simd_test( enable = "crypto" ) ]
304
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
305
+ #[ cfg_attr(
306
+ all( not( bootstrap) , target_arch = "aarch64" ) ,
307
+ simd_test( enable = "aes" )
308
+ ) ]
234
309
unsafe fn test_vaesimcq_u8 ( ) {
235
310
let data = mem:: transmute ( u8x16:: new ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ) ) ;
236
311
let r: u8x16 = mem:: transmute ( vaesimcq_u8 ( data) ) ;
@@ -240,13 +315,15 @@ mod tests {
240
315
) ;
241
316
}
242
317
243
- #[ simd_test( enable = "crypto" ) ]
318
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
319
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
244
320
unsafe fn test_vsha1h_u32 ( ) {
245
321
assert_eq ! ( vsha1h_u32( 0x1234 ) , 0x048d ) ;
246
322
assert_eq ! ( vsha1h_u32( 0x5678 ) , 0x159e ) ;
247
323
}
248
324
249
- #[ simd_test( enable = "crypto" ) ]
325
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
326
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
250
327
unsafe fn test_vsha1su0q_u32 ( ) {
251
328
let r: u32x4 = mem:: transmute ( vsha1su0q_u32 (
252
329
mem:: transmute ( u32x4:: new ( 0x1234_u32 , 0x5678_u32 , 0x9abc_u32 , 0xdef0_u32 ) ) ,
@@ -256,7 +333,8 @@ mod tests {
256
333
assert_eq ! ( r, u32x4:: new( 0x9abc , 0xdef0 , 0x1234 , 0x5678 ) ) ;
257
334
}
258
335
259
- #[ simd_test( enable = "crypto" ) ]
336
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
337
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
260
338
unsafe fn test_vsha1su1q_u32 ( ) {
261
339
let r: u32x4 = mem:: transmute ( vsha1su1q_u32 (
262
340
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -268,7 +346,8 @@ mod tests {
268
346
) ;
269
347
}
270
348
271
- #[ simd_test( enable = "crypto" ) ]
349
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
350
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
272
351
unsafe fn test_vsha1cq_u32 ( ) {
273
352
let r: u32x4 = mem:: transmute ( vsha1cq_u32 (
274
353
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -281,7 +360,8 @@ mod tests {
281
360
) ;
282
361
}
283
362
284
- #[ simd_test( enable = "crypto" ) ]
363
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
364
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
285
365
unsafe fn test_vsha1pq_u32 ( ) {
286
366
let r: u32x4 = mem:: transmute ( vsha1pq_u32 (
287
367
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -294,7 +374,8 @@ mod tests {
294
374
) ;
295
375
}
296
376
297
- #[ simd_test( enable = "crypto" ) ]
377
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
378
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
298
379
unsafe fn test_vsha1mq_u32 ( ) {
299
380
let r: u32x4 = mem:: transmute ( vsha1mq_u32 (
300
381
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -307,7 +388,8 @@ mod tests {
307
388
) ;
308
389
}
309
390
310
- #[ simd_test( enable = "crypto" ) ]
391
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
392
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
311
393
unsafe fn test_vsha256hq_u32 ( ) {
312
394
let r: u32x4 = mem:: transmute ( vsha256hq_u32 (
313
395
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -320,7 +402,8 @@ mod tests {
320
402
) ;
321
403
}
322
404
323
- #[ simd_test( enable = "crypto" ) ]
405
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
406
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
324
407
unsafe fn test_vsha256h2q_u32 ( ) {
325
408
let r: u32x4 = mem:: transmute ( vsha256h2q_u32 (
326
409
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -333,7 +416,8 @@ mod tests {
333
416
) ;
334
417
}
335
418
336
- #[ simd_test( enable = "crypto" ) ]
419
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
420
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
337
421
unsafe fn test_vsha256su0q_u32 ( ) {
338
422
let r: u32x4 = mem:: transmute ( vsha256su0q_u32 (
339
423
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
@@ -345,7 +429,8 @@ mod tests {
345
429
) ;
346
430
}
347
431
348
- #[ simd_test( enable = "crypto" ) ]
432
+ #[ cfg_attr( any( bootstrap, target_arch = "arm" ) , simd_test( enable = "crypto" ) ) ]
433
+ #[ cfg_attr( not( any( bootstrap, target_arch = "arm" ) ) , simd_test( enable = "sha2" ) ) ]
349
434
unsafe fn test_vsha256su1q_u32 ( ) {
350
435
let r: u32x4 = mem:: transmute ( vsha256su1q_u32 (
351
436
mem:: transmute ( u32x4:: new ( 0x1234 , 0x5678 , 0x9abc , 0xdef0 ) ) ,
0 commit comments