@@ -41,23 +41,13 @@ unsafe fn read_usize_unaligned(x: *const usize) -> usize {
41
41
core:: mem:: transmute ( x_read)
42
42
}
43
43
44
- /// Loads a `T`-sized chunk from `src` into `dst` at offset `offset`, if that does not exceed
45
- /// `load_sz`. The offset pointers must both be `T`-aligned. Returns the new offset, advanced by the
46
- /// chunk size if a load happened.
47
- #[ cfg( not( feature = "mem-unaligned" ) ) ]
48
44
#[ inline( always) ]
49
- unsafe fn load_chunk_aligned < T : Copy > (
50
- src : * const usize ,
51
- dst : * mut usize ,
52
- load_sz : usize ,
53
- offset : usize ,
54
- ) -> usize {
55
- let chunk_sz = core:: mem:: size_of :: < T > ( ) ;
56
- if ( load_sz & chunk_sz) != 0 {
57
- * dst. wrapping_byte_add ( offset) . cast :: < T > ( ) = * src. wrapping_byte_add ( offset) . cast :: < T > ( ) ;
58
- offset | chunk_sz
59
- } else {
60
- offset
45
+ unsafe fn copy_forward_bytes ( mut dest : * mut u8 , mut src : * const u8 , n : usize ) {
46
+ let dest_end = dest. wrapping_add ( n) ;
47
+ while dest < dest_end {
48
+ * dest = * src;
49
+ dest = dest. wrapping_add ( 1 ) ;
50
+ src = src. wrapping_add ( 1 ) ;
61
51
}
62
52
}
63
53
@@ -72,13 +62,8 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
72
62
// (since `load_sz < WORD_SIZE`).
73
63
const { assert ! ( WORD_SIZE <= 8 ) } ;
74
64
75
- let mut i = 0 ;
76
65
let mut out = 0usize ;
77
- // We load in decreasing order, so the pointers remain sufficiently aligned for the next step.
78
- i = load_chunk_aligned :: < u32 > ( src, & raw mut out, load_sz, i) ;
79
- i = load_chunk_aligned :: < u16 > ( src, & raw mut out, load_sz, i) ;
80
- i = load_chunk_aligned :: < u8 > ( src, & raw mut out, load_sz, i) ;
81
- debug_assert ! ( i == load_sz) ;
66
+ copy_forward_bytes ( & raw mut out as * mut u8 , src as * mut u8 , load_sz) ;
82
67
out
83
68
}
84
69
@@ -94,31 +79,18 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
94
79
// (since `load_sz < WORD_SIZE`).
95
80
const { assert ! ( WORD_SIZE <= 8 ) } ;
96
81
97
- let mut i = 0 ;
98
82
let mut out = 0usize ;
83
+
99
84
// Obtain pointers pointing to the beginning of the range we want to load.
100
85
let src_shifted = src. wrapping_byte_add ( WORD_SIZE - load_sz) ;
101
86
let out_shifted = ( & raw mut out) . wrapping_byte_add ( WORD_SIZE - load_sz) ;
102
- // We load in increasing order, so by the time we reach `u16` things are 2-aligned etc.
103
- i = load_chunk_aligned :: < u8 > ( src_shifted, out_shifted, load_sz, i) ;
104
- i = load_chunk_aligned :: < u16 > ( src_shifted, out_shifted, load_sz, i) ;
105
- i = load_chunk_aligned :: < u32 > ( src_shifted, out_shifted, load_sz, i) ;
106
- debug_assert ! ( i == load_sz) ;
87
+
88
+ copy_forward_bytes ( out_shifted as * mut u8 , src_shifted as * mut u8 , load_sz) ;
107
89
out
108
90
}
109
91
110
92
#[ inline( always) ]
111
93
pub unsafe fn copy_forward ( mut dest : * mut u8 , mut src : * const u8 , mut n : usize ) {
112
- #[ inline( always) ]
113
- unsafe fn copy_forward_bytes ( mut dest : * mut u8 , mut src : * const u8 , n : usize ) {
114
- let dest_end = dest. wrapping_add ( n) ;
115
- while dest < dest_end {
116
- * dest = * src;
117
- dest = dest. wrapping_add ( 1 ) ;
118
- src = src. wrapping_add ( 1 ) ;
119
- }
120
- }
121
-
122
94
#[ inline( always) ]
123
95
unsafe fn copy_forward_aligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
124
96
let mut dest_usize = dest as * mut usize ;
0 commit comments