@@ -17,7 +17,7 @@ use std::fmt;
17
17
use mir;
18
18
use hir:: def_id:: DefId ;
19
19
use ty:: { self , TyCtxt } ;
20
- use ty:: layout:: { self , Align , HasDataLayout } ;
20
+ use ty:: layout:: { self , Align , HasDataLayout , Size } ;
21
21
use middle:: region;
22
22
use std:: iter;
23
23
use std:: io;
@@ -109,42 +109,42 @@ impl<T: layout::HasDataLayout> PointerArithmetic for T {}
109
109
#[ derive( Copy , Clone , Debug , Eq , PartialEq , RustcEncodable , RustcDecodable , Hash ) ]
110
110
pub struct MemoryPointer {
111
111
pub alloc_id : AllocId ,
112
- pub offset : u64 ,
112
+ pub offset : Size ,
113
113
}
114
114
115
115
impl < ' tcx > MemoryPointer {
116
- pub fn new ( alloc_id : AllocId , offset : u64 ) -> Self {
116
+ pub fn new ( alloc_id : AllocId , offset : Size ) -> Self {
117
117
MemoryPointer { alloc_id, offset }
118
118
}
119
119
120
120
pub ( crate ) fn wrapping_signed_offset < C : HasDataLayout > ( self , i : i64 , cx : C ) -> Self {
121
121
MemoryPointer :: new (
122
122
self . alloc_id ,
123
- cx. data_layout ( ) . wrapping_signed_offset ( self . offset , i) ,
123
+ Size :: from_bytes ( cx. data_layout ( ) . wrapping_signed_offset ( self . offset . bytes ( ) , i) ) ,
124
124
)
125
125
}
126
126
127
127
pub fn overflowing_signed_offset < C : HasDataLayout > ( self , i : i128 , cx : C ) -> ( Self , bool ) {
128
- let ( res, over) = cx. data_layout ( ) . overflowing_signed_offset ( self . offset , i) ;
129
- ( MemoryPointer :: new ( self . alloc_id , res) , over)
128
+ let ( res, over) = cx. data_layout ( ) . overflowing_signed_offset ( self . offset . bytes ( ) , i) ;
129
+ ( MemoryPointer :: new ( self . alloc_id , Size :: from_bytes ( res) ) , over)
130
130
}
131
131
132
132
pub ( crate ) fn signed_offset < C : HasDataLayout > ( self , i : i64 , cx : C ) -> EvalResult < ' tcx , Self > {
133
133
Ok ( MemoryPointer :: new (
134
134
self . alloc_id ,
135
- cx. data_layout ( ) . signed_offset ( self . offset , i) ?,
135
+ Size :: from_bytes ( cx. data_layout ( ) . signed_offset ( self . offset . bytes ( ) , i) ?) ,
136
136
) )
137
137
}
138
138
139
- pub fn overflowing_offset < C : HasDataLayout > ( self , i : u64 , cx : C ) -> ( Self , bool ) {
140
- let ( res, over) = cx. data_layout ( ) . overflowing_offset ( self . offset , i) ;
141
- ( MemoryPointer :: new ( self . alloc_id , res) , over)
139
+ pub fn overflowing_offset < C : HasDataLayout > ( self , i : Size , cx : C ) -> ( Self , bool ) {
140
+ let ( res, over) = cx. data_layout ( ) . overflowing_offset ( self . offset . bytes ( ) , i. bytes ( ) ) ;
141
+ ( MemoryPointer :: new ( self . alloc_id , Size :: from_bytes ( res) ) , over)
142
142
}
143
143
144
- pub fn offset < C : HasDataLayout > ( self , i : u64 , cx : C ) -> EvalResult < ' tcx , Self > {
144
+ pub fn offset < C : HasDataLayout > ( self , i : Size , cx : C ) -> EvalResult < ' tcx , Self > {
145
145
Ok ( MemoryPointer :: new (
146
146
self . alloc_id ,
147
- cx. data_layout ( ) . offset ( self . offset , i) ? ,
147
+ Size :: from_bytes ( cx. data_layout ( ) . offset ( self . offset . bytes ( ) , i. bytes ( ) ) ? ) ,
148
148
) )
149
149
}
150
150
}
@@ -244,7 +244,7 @@ pub struct Allocation {
244
244
pub bytes : Vec < u8 > ,
245
245
/// Maps from byte addresses to allocations.
246
246
/// Only the first byte of a pointer is inserted into the map.
247
- pub relocations : BTreeMap < u64 , AllocId > ,
247
+ pub relocations : BTreeMap < Size , AllocId > ,
248
248
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
249
249
pub undef_mask : UndefMask ,
250
250
/// The alignment of the allocation to detect unaligned reads.
@@ -257,8 +257,8 @@ pub struct Allocation {
257
257
258
258
impl Allocation {
259
259
pub fn from_bytes ( slice : & [ u8 ] , align : Align ) -> Self {
260
- let mut undef_mask = UndefMask :: new ( 0 ) ;
261
- undef_mask. grow ( slice. len ( ) as u64 , true ) ;
260
+ let mut undef_mask = UndefMask :: new ( Size :: from_bytes ( 0 ) ) ;
261
+ undef_mask. grow ( Size :: from_bytes ( slice. len ( ) as u64 ) , true ) ;
262
262
Self {
263
263
bytes : slice. to_owned ( ) ,
264
264
relocations : BTreeMap :: new ( ) ,
@@ -272,10 +272,10 @@ impl Allocation {
272
272
Allocation :: from_bytes ( slice, Align :: from_bytes ( 1 , 1 ) . unwrap ( ) )
273
273
}
274
274
275
- pub fn undef ( size : u64 , align : Align ) -> Self {
276
- assert_eq ! ( size as usize as u64 , size) ;
275
+ pub fn undef ( size : Size , align : Align ) -> Self {
276
+ assert_eq ! ( size. bytes ( ) as usize as u64 , size. bytes ( ) ) ;
277
277
Allocation {
278
- bytes : vec ! [ 0 ; size as usize ] ,
278
+ bytes : vec ! [ 0 ; size. bytes ( ) as usize ] ,
279
279
relocations : BTreeMap :: new ( ) ,
280
280
undef_mask : UndefMask :: new ( size) ,
281
281
align,
@@ -331,54 +331,54 @@ const BLOCK_SIZE: u64 = 64;
331
331
#[ derive( Clone , Debug , Eq , PartialEq , Hash , RustcEncodable , RustcDecodable ) ]
332
332
pub struct UndefMask {
333
333
blocks : Vec < Block > ,
334
- len : u64 ,
334
+ len : Size ,
335
335
}
336
336
337
337
impl_stable_hash_for ! ( struct mir:: interpret:: UndefMask { blocks, len} ) ;
338
338
339
339
impl UndefMask {
340
- pub fn new ( size : u64 ) -> Self {
340
+ pub fn new ( size : Size ) -> Self {
341
341
let mut m = UndefMask {
342
342
blocks : vec ! [ ] ,
343
- len : 0 ,
343
+ len : Size :: from_bytes ( 0 ) ,
344
344
} ;
345
345
m. grow ( size, false ) ;
346
346
m
347
347
}
348
348
349
349
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
350
- pub fn is_range_defined ( & self , start : u64 , end : u64 ) -> bool {
350
+ pub fn is_range_defined ( & self , start : Size , end : Size ) -> bool {
351
351
if end > self . len {
352
352
return false ;
353
353
}
354
- for i in start.. end {
355
- if !self . get ( i ) {
354
+ for i in start. bytes ( ) .. end. bytes ( ) {
355
+ if !self . get ( Size :: from_bytes ( i ) ) {
356
356
return false ;
357
357
}
358
358
}
359
359
true
360
360
}
361
361
362
- pub fn set_range ( & mut self , start : u64 , end : u64 , new_state : bool ) {
362
+ pub fn set_range ( & mut self , start : Size , end : Size , new_state : bool ) {
363
363
let len = self . len ;
364
364
if end > len {
365
365
self . grow ( end - len, new_state) ;
366
366
}
367
367
self . set_range_inbounds ( start, end, new_state) ;
368
368
}
369
369
370
- pub fn set_range_inbounds ( & mut self , start : u64 , end : u64 , new_state : bool ) {
371
- for i in start.. end {
372
- self . set ( i , new_state) ;
370
+ pub fn set_range_inbounds ( & mut self , start : Size , end : Size , new_state : bool ) {
371
+ for i in start. bytes ( ) .. end. bytes ( ) {
372
+ self . set ( Size :: from_bytes ( i ) , new_state) ;
373
373
}
374
374
}
375
375
376
- pub fn get ( & self , i : u64 ) -> bool {
376
+ pub fn get ( & self , i : Size ) -> bool {
377
377
let ( block, bit) = bit_index ( i) ;
378
378
( self . blocks [ block] & 1 << bit) != 0
379
379
}
380
380
381
- pub fn set ( & mut self , i : u64 , new_state : bool ) {
381
+ pub fn set ( & mut self , i : Size , new_state : bool ) {
382
382
let ( block, bit) = bit_index ( i) ;
383
383
if new_state {
384
384
self . blocks [ block] |= 1 << bit;
@@ -387,10 +387,10 @@ impl UndefMask {
387
387
}
388
388
}
389
389
390
- pub fn grow ( & mut self , amount : u64 , new_state : bool ) {
391
- let unused_trailing_bits = self . blocks . len ( ) as u64 * BLOCK_SIZE - self . len ;
392
- if amount > unused_trailing_bits {
393
- let additional_blocks = amount / BLOCK_SIZE + 1 ;
390
+ pub fn grow ( & mut self , amount : Size , new_state : bool ) {
391
+ let unused_trailing_bits = self . blocks . len ( ) as u64 * BLOCK_SIZE - self . len . bytes ( ) ;
392
+ if amount. bytes ( ) > unused_trailing_bits {
393
+ let additional_blocks = amount. bytes ( ) / BLOCK_SIZE + 1 ;
394
394
assert_eq ! ( additional_blocks as usize as u64 , additional_blocks) ;
395
395
self . blocks . extend (
396
396
iter:: repeat ( 0 ) . take ( additional_blocks as usize ) ,
@@ -402,7 +402,8 @@ impl UndefMask {
402
402
}
403
403
}
404
404
405
- fn bit_index ( bits : u64 ) -> ( usize , usize ) {
405
+ fn bit_index ( bits : Size ) -> ( usize , usize ) {
406
+ let bits = bits. bytes ( ) ;
406
407
let a = bits / BLOCK_SIZE ;
407
408
let b = bits % BLOCK_SIZE ;
408
409
assert_eq ! ( a as usize as u64 , a) ;
0 commit comments