Skip to content

Commit 7c6befe

Browse files
authored
Merge pull request rust-lang#274 from RalfJung/packed2
make force_allocation handle packed ByValPair
2 parents f906c54 + 4458001 commit 7c6befe

File tree

9 files changed

+187
-139
lines changed

9 files changed

+187
-139
lines changed

src/librustc_mir/interpret/eval_context.rs

Lines changed: 75 additions & 45 deletions
Large diffs are not rendered by default.

src/librustc_mir/interpret/lvalue.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
196196
match lvalue {
197197
Lvalue::Ptr { ptr, extra, aligned } => {
198198
assert_eq!(extra, LvalueExtra::None);
199-
Ok(Value::ByRef(ptr, aligned))
199+
Ok(Value::ByRef { ptr, aligned })
200200
}
201201
Lvalue::Local { frame, local } => {
202202
self.stack[frame].get_local(local)
@@ -305,7 +305,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
305305
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
306306
return Ok(base);
307307
},
308-
Value::ByRef(..) |
308+
Value::ByRef{..} |
309309
Value::ByValPair(..) |
310310
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
311311
},
@@ -315,7 +315,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
315315
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
316316
return Ok(base);
317317
},
318-
Value::ByRef(..) |
318+
Value::ByRef{..} |
319319
Value::ByValPair(..) |
320320
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
321321
},
@@ -349,17 +349,17 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
349349
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
350350
}
351351

352-
pub(super) fn val_to_lvalue(&mut self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
352+
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
353353
Ok(match self.tcx.struct_tail(ty).sty {
354354
ty::TyDynamic(..) => {
355-
let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
355+
let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
356356
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
357357
},
358358
ty::TyStr | ty::TySlice(_) => {
359-
let (ptr, len) = val.into_slice(&mut self.memory)?;
359+
let (ptr, len) = val.into_slice(&self.memory)?;
360360
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
361361
},
362-
_ => Lvalue::Ptr { ptr: val.into_ptr(&mut self.memory)?, extra: LvalueExtra::None, aligned: true },
362+
_ => Lvalue::Ptr { ptr: val.into_ptr(&self.memory)?, extra: LvalueExtra::None, aligned: true },
363363
})
364364
}
365365

src/librustc_mir/interpret/memory.rs

Lines changed: 33 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
22
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
33
use std::{fmt, iter, ptr, mem, io, ops};
4+
use std::cell::Cell;
45

56
use rustc::ty;
67
use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
@@ -266,8 +267,8 @@ pub struct Memory<'a, 'tcx> {
266267

267268
/// To avoid having to pass flags to every single memory access, we have some global state saying whether
268269
/// alignment checking is currently enforced for read and/or write accesses.
269-
reads_are_aligned: bool,
270-
writes_are_aligned: bool,
270+
reads_are_aligned: Cell<bool>,
271+
writes_are_aligned: Cell<bool>,
271272

272273
/// The current stack frame. Used to check accesses against locks.
273274
cur_frame: usize,
@@ -287,8 +288,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
287288
literal_alloc_cache: HashMap::new(),
288289
thread_local: BTreeMap::new(),
289290
next_thread_local: 0,
290-
reads_are_aligned: true,
291-
writes_are_aligned: true,
291+
reads_are_aligned: Cell::new(true),
292+
writes_are_aligned: Cell::new(true),
292293
cur_frame: usize::max_value(),
293294
}
294295
}
@@ -796,7 +797,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
796797
impl<'a, 'tcx> Memory<'a, 'tcx> {
797798
fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
798799
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
799-
if self.reads_are_aligned {
800+
if self.reads_are_aligned.get() {
800801
self.check_align(ptr.into(), align)?;
801802
}
802803
if size == 0 {
@@ -813,7 +814,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
813814

814815
fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
815816
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
816-
if self.writes_are_aligned {
817+
if self.writes_are_aligned.get() {
817818
self.check_align(ptr.into(), align)?;
818819
}
819820
if size == 0 {
@@ -909,10 +910,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
909910
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
910911
if size == 0 {
911912
// Empty accesses don't need to be valid pointers, but they should still be aligned
912-
if self.reads_are_aligned {
913+
if self.reads_are_aligned.get() {
913914
self.check_align(src, align)?;
914915
}
915-
if self.writes_are_aligned {
916+
if self.writes_are_aligned.get() {
916917
self.check_align(dest, align)?;
917918
}
918919
return Ok(());
@@ -968,7 +969,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
968969
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
969970
if size == 0 {
970971
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
971-
if self.reads_are_aligned {
972+
if self.reads_are_aligned.get() {
972973
self.check_align(ptr, 1)?;
973974
}
974975
return Ok(&[]);
@@ -979,7 +980,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
979980
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
980981
if src.is_empty() {
981982
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
982-
if self.writes_are_aligned {
983+
if self.writes_are_aligned.get() {
983984
self.check_align(ptr, 1)?;
984985
}
985986
return Ok(());
@@ -992,7 +993,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
992993
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
993994
if count == 0 {
994995
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
995-
if self.writes_are_aligned {
996+
if self.writes_are_aligned.get() {
996997
self.check_align(ptr, 1)?;
997998
}
998999
return Ok(());
@@ -1399,23 +1400,36 @@ pub(crate) trait HasMemory<'a, 'tcx> {
13991400
fn memory(&self) -> &Memory<'a, 'tcx>;
14001401

14011402
// These are not supposed to be overriden.
1402-
fn read_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1403+
fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1404+
where F: FnOnce(&Self) -> EvalResult<'tcx, T>
1405+
{
1406+
let old = self.memory().reads_are_aligned.get();
1407+
// Do alignment checking if *all* nested calls say it has to be aligned.
1408+
self.memory().reads_are_aligned.set(old && aligned);
1409+
let t = f(self);
1410+
self.memory().reads_are_aligned.set(old);
1411+
t
1412+
}
1413+
1414+
fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
14031415
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
14041416
{
1405-
assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested");
1406-
self.memory_mut().reads_are_aligned = aligned;
1417+
let old = self.memory().reads_are_aligned.get();
1418+
// Do alignment checking if *all* nested calls say it has to be aligned.
1419+
self.memory().reads_are_aligned.set(old && aligned);
14071420
let t = f(self);
1408-
self.memory_mut().reads_are_aligned = true;
1421+
self.memory().reads_are_aligned.set(old);
14091422
t
14101423
}
14111424

1412-
fn write_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1425+
fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
14131426
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
14141427
{
1415-
assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested");
1416-
self.memory_mut().writes_are_aligned = aligned;
1428+
let old = self.memory().writes_are_aligned.get();
1429+
// Do alignment checking if *all* nested calls say it has to be aligned.
1430+
self.memory().writes_are_aligned.set(old && aligned);
14171431
let t = f(self);
1418-
self.memory_mut().writes_are_aligned = true;
1432+
self.memory().writes_are_aligned.set(old);
14191433
t
14201434
}
14211435
}

src/librustc_mir/interpret/step.rs

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,14 @@ use rustc::ty;
1111
use rustc::ty::layout::Layout;
1212
use rustc::ty::subst::Substs;
1313

14+
use syntax::codemap::Span;
15+
use syntax::ast::Mutability;
16+
1417
use error::{EvalResult, EvalError};
15-
use eval_context::{EvalContext, StackPopCleanup};
18+
use eval_context::{EvalContext, StackPopCleanup, TyAndPacked};
1619
use lvalue::{Global, GlobalId, Lvalue};
1720
use value::{Value, PrimVal};
18-
use syntax::codemap::Span;
19-
use syntax::ast::Mutability;
21+
use memory::HasMemory;
2022

2123
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
2224
pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
@@ -101,12 +103,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
101103

102104
Layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
103105
if variant_index as u64 != nndiscr {
104-
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
106+
let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
105107
let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(offset.bytes(), &self)?;
106108
trace!("struct wrapped nullable pointer type: {}", ty);
107109
// only the pointer part of a fat pointer is used for this space optimization
108110
let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
109-
self.memory.write_uint(nonnull, 0, discr_size)?;
111+
self.write_maybe_aligned_mut(!packed, |ectx| ectx.memory.write_uint(nonnull, 0, discr_size))?;
110112
}
111113
},
112114

src/librustc_mir/interpret/terminator/intrinsic.rs

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
4545

4646
"arith_offset" => {
4747
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
48-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
48+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
4949
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
5050
self.write_ptr(dest, result_ptr, dest_ty)?;
5151
}
@@ -61,7 +61,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
6161
"atomic_load_acq" |
6262
"volatile_load" => {
6363
let ty = substs.type_at(0);
64-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
64+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
6565
self.write_value(Value::by_ref(ptr), dest, ty)?;
6666
}
6767

@@ -70,7 +70,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
7070
"atomic_store_rel" |
7171
"volatile_store" => {
7272
let ty = substs.type_at(0);
73-
let dest = arg_vals[0].into_ptr(&mut self.memory)?;
73+
let dest = arg_vals[0].into_ptr(&self.memory)?;
7474
self.write_value_to_ptr(arg_vals[1], dest, ty)?;
7575
}
7676

@@ -80,12 +80,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
8080

8181
_ if intrinsic_name.starts_with("atomic_xchg") => {
8282
let ty = substs.type_at(0);
83-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
83+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
8484
let change = self.value_to_primval(arg_vals[1], ty)?;
8585
let old = self.read_value(ptr, ty)?;
8686
let old = match old {
8787
Value::ByVal(val) => val,
88-
Value::ByRef(..) => bug!("just read the value, can't be byref"),
88+
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
8989
Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
9090
};
9191
self.write_primval(dest, old, ty)?;
@@ -94,13 +94,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
9494

9595
_ if intrinsic_name.starts_with("atomic_cxchg") => {
9696
let ty = substs.type_at(0);
97-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
97+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
9898
let expect_old = self.value_to_primval(arg_vals[1], ty)?;
9999
let change = self.value_to_primval(arg_vals[2], ty)?;
100100
let old = self.read_value(ptr, ty)?;
101101
let old = match old {
102102
Value::ByVal(val) => val,
103-
Value::ByRef(..) => bug!("just read the value, can't be byref"),
103+
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
104104
Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
105105
};
106106
let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
@@ -115,12 +115,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
115115
"atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
116116
"atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
117117
let ty = substs.type_at(0);
118-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
118+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
119119
let change = self.value_to_primval(arg_vals[1], ty)?;
120120
let old = self.read_value(ptr, ty)?;
121121
let old = match old {
122122
Value::ByVal(val) => val,
123-
Value::ByRef(..) => bug!("just read the value, can't be byref"),
123+
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
124124
Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
125125
};
126126
self.write_primval(dest, old, ty)?;
@@ -148,8 +148,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
148148
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
149149
// Also see the write_bytes intrinsic.
150150
let elem_align = self.type_align(elem_ty)?;
151-
let src = arg_vals[0].into_ptr(&mut self.memory)?;
152-
let dest = arg_vals[1].into_ptr(&mut self.memory)?;
151+
let src = arg_vals[0].into_ptr(&self.memory)?;
152+
let dest = arg_vals[1].into_ptr(&self.memory)?;
153153
self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
154154
}
155155
}
@@ -176,7 +176,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
176176

177177
"discriminant_value" => {
178178
let ty = substs.type_at(0);
179-
let adt_ptr = arg_vals[0].into_ptr(&mut self.memory)?.to_ptr()?;
179+
let adt_ptr = arg_vals[0].into_ptr(&self.memory)?.to_ptr()?;
180180
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
181181
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
182182
}
@@ -251,10 +251,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
251251
let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
252252
let init = |this: &mut Self, val: Value| {
253253
let zero_val = match val {
254-
Value::ByRef(ptr, aligned) => {
254+
Value::ByRef { ptr, aligned } => {
255255
// These writes have no alignment restriction anyway.
256256
this.memory.write_repeat(ptr, 0, size)?;
257-
Value::ByRef(ptr, aligned)
257+
Value::ByRef { ptr, aligned }
258258
},
259259
// TODO(solson): Revisit this, it's fishy to check for Undef here.
260260
Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
@@ -297,7 +297,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
297297

298298
"move_val_init" => {
299299
let ty = substs.type_at(0);
300-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
300+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
301301
self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
302302
}
303303

@@ -310,7 +310,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
310310

311311
"offset" => {
312312
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
313-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
313+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
314314
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
315315
self.write_ptr(dest, result_ptr, dest_ty)?;
316316
}
@@ -399,7 +399,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
399399
"transmute" => {
400400
let src_ty = substs.type_at(0);
401401
let ptr = self.force_allocation(dest)?.to_ptr()?;
402-
self.write_maybe_aligned(/*aligned*/false, |ectx| {
402+
self.write_maybe_aligned_mut(/*aligned*/false, |ectx| {
403403
ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
404404
})?;
405405
}
@@ -442,9 +442,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
442442
let size = dest_layout.size(&self.tcx.data_layout).bytes();
443443
let uninit = |this: &mut Self, val: Value| {
444444
match val {
445-
Value::ByRef(ptr, aligned) => {
445+
Value::ByRef { ptr, aligned } => {
446446
this.memory.mark_definedness(ptr, size, false)?;
447-
Ok(Value::ByRef(ptr, aligned))
447+
Ok(Value::ByRef { ptr, aligned })
448448
},
449449
_ => Ok(Value::ByVal(PrimVal::Undef)),
450450
}
@@ -464,7 +464,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
464464
let ty_align = self.type_align(ty)?;
465465
let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
466466
let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
467-
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
467+
let ptr = arg_vals[0].into_ptr(&self.memory)?;
468468
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
469469
if count > 0 {
470470
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
@@ -550,15 +550,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
550550
Ok((size, align.abi()))
551551
}
552552
ty::TyDynamic(..) => {
553-
let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
553+
let (_, vtable) = value.into_ptr_vtable_pair(&self.memory)?;
554554
// the second entry in the vtable is the dynamic size of the object.
555555
self.read_size_and_align_from_vtable(vtable)
556556
}
557557

558558
ty::TySlice(_) | ty::TyStr => {
559559
let elem_ty = ty.sequence_element_type(self.tcx);
560560
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
561-
let (_, len) = value.into_slice(&mut self.memory)?;
561+
let (_, len) = value.into_slice(&self.memory)?;
562562
let align = self.type_align(elem_ty)?;
563563
Ok((len * elem_size, align as u64))
564564
}

0 commit comments

Comments
 (0)