@@ -76,90 +76,205 @@ intrinsics! {
76
76
) ;
77
77
}
78
78
79
- // FIXME: The `*4` and `*8` variants should be defined as aliases.
79
+ // FIXME(arm) : The `*4` and `*8` variants should be defined as aliases.
80
80
81
+ /// `memcpy` provided with the `aapcs` ABI.
82
+ ///
83
+ /// # Safety
84
+ ///
85
+ /// Usual `memcpy` requirements apply.
81
86
#[ cfg( not( target_vendor = "apple" ) ) ]
82
- pub unsafe extern "aapcs" fn __aeabi_memcpy( dest: * mut u8 , src: * const u8 , n: usize ) {
83
- crate :: mem:: memcpy( dest, src, n) ;
87
+ pub unsafe extern "aapcs" fn __aeabi_memcpy( dst: * mut u8 , src: * const u8 , n: usize ) {
88
+ // SAFETY: memcpy preconditions apply.
89
+ unsafe { crate :: mem:: memcpy( dst, src, n) } ;
84
90
}
85
91
92
+ /// `memcpy` for 4-byte alignment.
93
+ ///
94
+ /// # Safety
95
+ ///
96
+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
97
+ /// four bytes.
86
98
#[ cfg( not( target_vendor = "apple" ) ) ]
87
- pub unsafe extern "aapcs" fn __aeabi_memcpy4( dest : * mut u8 , src: * const u8 , n: usize ) {
99
+ pub unsafe extern "aapcs" fn __aeabi_memcpy4( dst : * mut u8 , src: * const u8 , n: usize ) {
88
100
// We are guaranteed 4-alignment, so accessing at u32 is okay.
89
- let mut dest = dest as * mut u32 ;
90
- let mut src = src as * mut u32 ;
101
+ let mut dst = dst. cast:: <u32 >( ) ;
102
+ let mut src = src. cast:: <u32 >( ) ;
103
+ debug_assert!( dst. is_aligned( ) ) ;
104
+ debug_assert!( src. is_aligned( ) ) ;
91
105
let mut n = n;
92
106
93
107
while n >= 4 {
94
- * dest = * src;
95
- dest = dest. offset( 1 ) ;
96
- src = src. offset( 1 ) ;
108
+ // SAFETY: `dst` and `src` are both valid for at least 4 bytes, from
109
+ // `memcpy` preconditions and the loop guard.
110
+ unsafe { * dst = * src } ;
111
+
112
+ // FIXME(addr): if we can make this end-of-address-space safe without losing
113
+ // performance, we may want to consider that.
114
+ // SAFETY: memcpy is not expected to work at the end of the address space
115
+ unsafe {
116
+ dst = dst. offset( 1 ) ;
117
+ src = src. offset( 1 ) ;
118
+ }
119
+
97
120
n -= 4 ;
98
121
}
99
122
100
- __aeabi_memcpy( dest as * mut u8 , src as * const u8 , n) ;
123
+ // SAFETY: `dst` and `src` will still be valid for `n` bytes
124
+ unsafe { __aeabi_memcpy( dst. cast:: <u8 >( ) , src. cast:: <u8 >( ) , n) } ;
101
125
}
102
126
127
+ /// `memcpy` for 8-byte alignment.
128
+ ///
129
+ /// # Safety
130
+ ///
131
+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
132
+ /// eight bytes.
103
133
#[ cfg( not( target_vendor = "apple" ) ) ]
104
- pub unsafe extern "aapcs" fn __aeabi_memcpy8( dest: * mut u8 , src: * const u8 , n: usize ) {
105
- __aeabi_memcpy4( dest, src, n) ;
134
+ pub unsafe extern "aapcs" fn __aeabi_memcpy8( dst: * mut u8 , src: * const u8 , n: usize ) {
135
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
136
+ debug_assert!( src. addr( ) & 7 == 0 ) ;
137
+
138
+ // SAFETY: memcpy preconditions apply, less strict alignment.
139
+ unsafe { __aeabi_memcpy4( dst, src, n) } ;
106
140
}
107
141
142
+ /// `memmove` provided with the `aapcs` ABI.
143
+ ///
144
+ /// # Safety
145
+ ///
146
+ /// Usual `memmove` requirements apply.
108
147
#[ cfg( not( target_vendor = "apple" ) ) ]
109
- pub unsafe extern "aapcs" fn __aeabi_memmove( dest: * mut u8 , src: * const u8 , n: usize ) {
110
- crate :: mem:: memmove( dest, src, n) ;
148
+ pub unsafe extern "aapcs" fn __aeabi_memmove( dst: * mut u8 , src: * const u8 , n: usize ) {
149
+ // SAFETY: memmove preconditions apply.
150
+ unsafe { crate :: mem:: memmove( dst, src, n) } ;
111
151
}
112
152
153
+ /// `memmove` for 4-byte alignment.
154
+ ///
155
+ /// # Safety
156
+ ///
157
+ /// Usual `memmove` requirements apply. Additionally, `dest` and `src` must be aligned to
158
+ /// four bytes.
113
159
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
114
- pub unsafe extern "aapcs" fn __aeabi_memmove4( dest: * mut u8 , src: * const u8 , n: usize ) {
115
- __aeabi_memmove( dest, src, n) ;
160
+ pub unsafe extern "aapcs" fn __aeabi_memmove4( dst: * mut u8 , src: * const u8 , n: usize ) {
161
+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
162
+ debug_assert!( src. addr( ) & 3 == 0 ) ;
163
+
164
+ // SAFETY: same preconditions, less strict aligment.
165
+ unsafe { __aeabi_memmove( dst, src, n) } ;
116
166
}
117
167
168
+ /// `memmove` for 8-byte alignment.
169
+ ///
170
+ /// # Safety
171
+ ///
172
+ /// Usual `memmove` requirements apply. Additionally, `dst` and `src` must be aligned to
173
+ /// eight bytes.
118
174
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
119
- pub unsafe extern "aapcs" fn __aeabi_memmove8( dest: * mut u8 , src: * const u8 , n: usize ) {
120
- __aeabi_memmove( dest, src, n) ;
175
+ pub unsafe extern "aapcs" fn __aeabi_memmove8( dst: * mut u8 , src: * const u8 , n: usize ) {
176
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
177
+ debug_assert!( src. addr( ) & 7 == 0 ) ;
178
+
179
+ // SAFETY: memmove preconditions apply, less strict alignment.
180
+ unsafe { __aeabi_memmove( dst, src, n) } ;
121
181
}
122
182
183
+ /// `memset` provided with the `aapcs` ABI.
184
+ ///
185
+ /// # Safety
186
+ ///
187
+ /// Usual `memset` requirements apply.
123
188
#[ cfg( not( target_vendor = "apple" ) ) ]
124
- pub unsafe extern "aapcs" fn __aeabi_memset( dest : * mut u8 , n: usize , c: i32 ) {
189
+ pub unsafe extern "aapcs" fn __aeabi_memset( dst : * mut u8 , n: usize , c: i32 ) {
125
190
// Note the different argument order
126
- crate :: mem:: memset( dest, c, n) ;
191
+ // SAFETY: memset preconditions apply.
192
+ unsafe { crate :: mem:: memset( dst, c, n) } ;
127
193
}
128
194
195
+ /// `memset` for 4-byte alignment.
196
+ ///
197
+ /// # Safety
198
+ ///
199
+ /// Usual `memset` requirements apply. Additionally, `dest` and `src` must be aligned to
200
+ /// four bytes.
129
201
#[ cfg( not( target_vendor = "apple" ) ) ]
130
- pub unsafe extern "aapcs" fn __aeabi_memset4( dest: * mut u8 , n: usize , c: i32 ) {
131
- let mut dest = dest as * mut u32 ;
202
+ pub unsafe extern "aapcs" fn __aeabi_memset4( dst: * mut u8 , n: usize , c: i32 ) {
203
+ let mut dst = dst. cast:: <u32 >( ) ;
204
+ debug_assert!( dst. is_aligned( ) ) ;
132
205
let mut n = n;
133
206
134
207
let byte = ( c as u32 ) & 0xff ;
135
208
let c = ( byte << 24 ) | ( byte << 16 ) | ( byte << 8 ) | byte;
136
209
137
210
while n >= 4 {
138
- * dest = c;
139
- dest = dest. offset( 1 ) ;
211
+ // SAFETY: `dst` is valid for at least 4 bytes, from `memset` preconditions and
212
+ // the loop guard.
213
+ unsafe { * dst = c } ;
214
+
215
+ // FIXME(addr): if we can make this end-of-address-space safe without losing
216
+ // performance, we may want to consider that.
217
+ // SAFETY: memcpy is not expected to work at the end of the address space
218
+ unsafe {
219
+ dst = dst. offset( 1 ) ;
220
+ }
140
221
n -= 4 ;
141
222
}
142
223
143
- __aeabi_memset( dest as * mut u8 , n, byte as i32 ) ;
224
+ // SAFETY: `dst` will still be valid for `n` bytes
225
+ unsafe { __aeabi_memset( dst. cast:: <u8 >( ) , n, byte as i32 ) } ;
144
226
}
145
227
228
+ /// `memset` for 8-byte alignment.
229
+ ///
230
+ /// # Safety
231
+ ///
232
+ /// Usual `memset` requirements apply. Additionally, `dst` and `src` must be aligned to
233
+ /// eight bytes.
146
234
#[ cfg( not( target_vendor = "apple" ) ) ]
147
- pub unsafe extern "aapcs" fn __aeabi_memset8( dest: * mut u8 , n: usize , c: i32 ) {
148
- __aeabi_memset4( dest, n, c) ;
235
+ pub unsafe extern "aapcs" fn __aeabi_memset8( dst: * mut u8 , n: usize , c: i32 ) {
236
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
237
+
238
+ // SAFETY: memset preconditions apply, less strict alignment.
239
+ unsafe { __aeabi_memset4( dst, n, c) } ;
149
240
}
150
241
242
+ /// `memclr` provided with the `aapcs` ABI.
243
+ ///
244
+ /// # Safety
245
+ ///
246
+ /// Usual `memclr` requirements apply.
151
247
#[ cfg( not( target_vendor = "apple" ) ) ]
152
- pub unsafe extern "aapcs" fn __aeabi_memclr( dest: * mut u8 , n: usize ) {
153
- __aeabi_memset( dest, n, 0 ) ;
248
+ pub unsafe extern "aapcs" fn __aeabi_memclr( dst: * mut u8 , n: usize ) {
249
+ // SAFETY: memclr preconditions apply, less strict alignment.
250
+ unsafe { __aeabi_memset( dst, n, 0 ) } ;
154
251
}
155
252
253
+ /// `memclr` for 4-byte alignment.
254
+ ///
255
+ /// # Safety
256
+ ///
257
+ /// Usual `memclr` requirements apply. Additionally, `dest` and `src` must be aligned to
258
+ /// four bytes.
156
259
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
157
- pub unsafe extern "aapcs" fn __aeabi_memclr4( dest: * mut u8 , n: usize ) {
158
- __aeabi_memset4( dest, n, 0 ) ;
260
+ pub unsafe extern "aapcs" fn __aeabi_memclr4( dst: * mut u8 , n: usize ) {
261
+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
262
+
263
+ // SAFETY: memclr preconditions apply, less strict alignment.
264
+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
159
265
}
160
266
267
+ /// `memclr` for 8-byte alignment.
268
+ ///
269
+ /// # Safety
270
+ ///
271
+ /// Usual `memclr` requirements apply. Additionally, `dst` and `src` must be aligned to
272
+ /// eight bytes.
161
273
#[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
162
- pub unsafe extern "aapcs" fn __aeabi_memclr8( dest: * mut u8 , n: usize ) {
163
- __aeabi_memset4( dest, n, 0 ) ;
274
+ pub unsafe extern "aapcs" fn __aeabi_memclr8( dst: * mut u8 , n: usize ) {
275
+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
276
+
277
+ // SAFETY: memclr preconditions apply, less strict alignment.
278
+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
164
279
}
165
280
}
0 commit comments