Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit b0e46f0

Browse files
committedFeb 9, 2017
Auto merge of #39586 - arielb1:packed-values, r=eddyb
emit "align 1" metadata on loads/stores of packed structs According to the LLVM reference: > A value of 0 or an omitted align argument means that the operation has the ABI alignment for the target. So loads/stores of fields of packed structs need to have their align set to 1. Implement that by tracking the alignment of `LvalueRef`s. Fixes #39376. r? @eddyb
2 parents fd2f8a4 + d71988a commit b0e46f0

File tree

15 files changed

+388
-267
lines changed

15 files changed

+388
-267
lines changed
 

‎src/librustc_trans/adt.rs

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@ use monomorphize;
5656
use type_::Type;
5757
use type_of;
5858

59+
use mir::lvalue::Alignment;
60+
5961
/// Given an enum, struct, closure, or tuple, extracts fields.
6062
/// Treats closures as a struct with one variant.
6163
/// `empty_if_no_variants` is a switch to deal with empty enums.
@@ -279,6 +281,7 @@ pub fn trans_get_discr<'a, 'tcx>(
279281
bcx: &Builder<'a, 'tcx>,
280282
t: Ty<'tcx>,
281283
scrutinee: ValueRef,
284+
alignment: Alignment,
282285
cast_to: Option<Type>,
283286
range_assert: bool
284287
) -> ValueRef {
@@ -292,11 +295,12 @@ pub fn trans_get_discr<'a, 'tcx>(
292295

293296
let val = match *l {
294297
layout::CEnum { discr, min, max, .. } => {
295-
load_discr(bcx, discr, scrutinee, min, max, range_assert)
298+
load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
296299
}
297300
layout::General { discr, .. } => {
298301
let ptr = bcx.struct_gep(scrutinee, 0);
299-
load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1,
302+
load_discr(bcx, discr, ptr, alignment,
303+
0, def.variants.len() as u64 - 1,
300304
range_assert)
301305
}
302306
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
@@ -305,10 +309,10 @@ pub fn trans_get_discr<'a, 'tcx>(
305309
let llptrty = type_of::sizing_type_of(bcx.ccx,
306310
monomorphize::field_ty(bcx.tcx(), substs,
307311
&def.variants[nndiscr as usize].fields[0]));
308-
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
312+
bcx.icmp(cmp, bcx.load(scrutinee, alignment.to_align()), C_null(llptrty))
309313
}
310314
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
311-
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
315+
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
312316
},
313317
_ => bug!("{} is not an enum", t)
314318
};
@@ -322,17 +326,19 @@ fn struct_wrapped_nullable_bitdiscr(
322326
bcx: &Builder,
323327
nndiscr: u64,
324328
discrfield: &layout::FieldPath,
325-
scrutinee: ValueRef
329+
scrutinee: ValueRef,
330+
alignment: Alignment,
326331
) -> ValueRef {
327332
let llptrptr = bcx.gepi(scrutinee,
328333
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
329-
let llptr = bcx.load(llptrptr);
334+
let llptr = bcx.load(llptrptr, alignment.to_align());
330335
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
331336
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
332337
}
333338

334339
/// Helper for cases where the discriminant is simply loaded.
335-
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
340+
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
341+
alignment: Alignment, min: u64, max: u64,
336342
range_assert: bool)
337343
-> ValueRef {
338344
let llty = Type::from_integer(bcx.ccx, ity);
@@ -348,11 +354,12 @@ fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max:
348354
// rejected by the LLVM verifier (it would mean either an
349355
// empty set, which is impossible, or the entire range of the
350356
// type, which is pointless).
351-
bcx.load(ptr)
357+
bcx.load(ptr, alignment.to_align())
352358
} else {
353359
// llvm::ConstantRange can deal with ranges that wrap around,
354360
// so an overflow on (max + 1) is fine.
355-
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True)
361+
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True,
362+
alignment.to_align())
356363
}
357364
}
358365

‎src/librustc_trans/asm.rs

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ use builder::Builder;
2020
use rustc::hir;
2121
use rustc::ty::Ty;
2222

23+
use mir::lvalue::Alignment;
24+
2325
use std::ffi::CString;
2426
use syntax::ast::AsmDialect;
2527
use libc::{c_uint, c_char};
@@ -38,7 +40,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
3840
let mut indirect_outputs = vec![];
3941
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
4042
let val = if out.is_rw || out.is_indirect {
41-
Some(base::load_ty(bcx, val, ty))
43+
Some(base::load_ty(bcx, val, Alignment::Packed, ty))
4244
} else {
4345
None
4446
};

‎src/librustc_trans/base.rs

Lines changed: 41 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ use rustc::hir;
9090
use rustc::ty::layout::{self, Layout};
9191
use syntax::ast;
9292

93+
use mir::lvalue::Alignment;
94+
9395
pub struct StatRecorder<'a, 'tcx: 'a> {
9496
ccx: &'a CrateContext<'a, 'tcx>,
9597
name: Option<String>,
@@ -250,25 +252,25 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
250252
/// Coerce `src`, which is a reference to a value of type `src_ty`,
251253
/// to a value of type `dst_ty` and store the result in `dst`
252254
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
253-
src: ValueRef,
254-
src_ty: Ty<'tcx>,
255-
dst: ValueRef,
256-
dst_ty: Ty<'tcx>) {
255+
src: &LvalueRef<'tcx>,
256+
dst: &LvalueRef<'tcx>) {
257+
let src_ty = src.ty.to_ty(bcx.tcx());
258+
let dst_ty = dst.ty.to_ty(bcx.tcx());
257259
let coerce_ptr = || {
258260
let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) {
259261
// fat-ptr to fat-ptr unsize preserves the vtable
260262
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
261263
// So we need to pointercast the base to ensure
262264
// the types match up.
263-
let (base, info) = load_fat_ptr(bcx, src, src_ty);
265+
let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty);
264266
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
265267
let base = bcx.pointercast(base, llcast_ty);
266268
(base, info)
267269
} else {
268-
let base = load_ty(bcx, src, src_ty);
270+
let base = load_ty(bcx, src.llval, src.alignment, src_ty);
269271
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
270272
};
271-
store_fat_ptr(bcx, base, info, dst, dst_ty);
273+
store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty);
272274
};
273275
match (&src_ty.sty, &dst_ty.sty) {
274276
(&ty::TyRef(..), &ty::TyRef(..)) |
@@ -290,21 +292,22 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
290292
monomorphize::field_ty(bcx.tcx(), substs_b, f)
291293
});
292294

293-
let src = LvalueRef::new_sized_ty(src, src_ty);
294-
let dst = LvalueRef::new_sized_ty(dst, dst_ty);
295-
296295
let iter = src_fields.zip(dst_fields).enumerate();
297296
for (i, (src_fty, dst_fty)) in iter {
298297
if type_is_zero_size(bcx.ccx, dst_fty) {
299298
continue;
300299
}
301300

302-
let src_f = src.trans_field_ptr(bcx, i);
303-
let dst_f = dst.trans_field_ptr(bcx, i);
301+
let (src_f, src_f_align) = src.trans_field_ptr(bcx, i);
302+
let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i);
304303
if src_fty == dst_fty {
305304
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
306305
} else {
307-
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
306+
coerce_unsized_into(
307+
bcx,
308+
&LvalueRef::new_sized_ty(src_f, src_fty, src_f_align),
309+
&LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align)
310+
);
308311
}
309312
}
310313
}
@@ -399,7 +402,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
399402
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
400403
/// differs from the type used for SSA values. Also handles various special cases where the type
401404
/// gives us better information about what we are loading.
402-
pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
405+
pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
406+
alignment: Alignment, t: Ty<'tcx>) -> ValueRef {
403407
let ccx = b.ccx;
404408
if type_is_zero_size(ccx, t) {
405409
return C_undef(type_of::type_of(ccx, t));
@@ -419,54 +423,57 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V
419423
}
420424

421425
if t.is_bool() {
422-
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx))
426+
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()),
427+
Type::i1(ccx))
423428
} else if t.is_char() {
424429
// a char is a Unicode codepoint, and so takes values from 0
425430
// to 0x10FFFF inclusive only.
426-
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
431+
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
427432
} else if (t.is_region_ptr() || t.is_box()) && !common::type_is_fat_ptr(ccx, t) {
428-
b.load_nonnull(ptr)
433+
b.load_nonnull(ptr, alignment.to_align())
429434
} else {
430-
b.load(ptr)
435+
b.load(ptr, alignment.to_align())
431436
}
432437
}
433438

434439
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
435440
/// differs from the type used for SSA values.
436-
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
441+
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef,
442+
dst_align: Alignment, t: Ty<'tcx>) {
437443
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
438444

439445
if common::type_is_fat_ptr(cx.ccx, t) {
440446
let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
441447
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
442-
store_fat_ptr(cx, lladdr, llextra, dst, t);
448+
store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t);
443449
} else {
444-
cx.store(from_immediate(cx, v), dst, None);
450+
cx.store(from_immediate(cx, v), dst, dst_align.to_align());
445451
}
446452
}
447453

448454
pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
449455
data: ValueRef,
450456
extra: ValueRef,
451457
dst: ValueRef,
458+
dst_align: Alignment,
452459
_ty: Ty<'tcx>) {
453460
// FIXME: emit metadata
454-
cx.store(data, get_dataptr(cx, dst), None);
455-
cx.store(extra, get_meta(cx, dst), None);
461+
cx.store(data, get_dataptr(cx, dst), dst_align.to_align());
462+
cx.store(extra, get_meta(cx, dst), dst_align.to_align());
456463
}
457464

458465
pub fn load_fat_ptr<'a, 'tcx>(
459-
b: &Builder<'a, 'tcx>, src: ValueRef, t: Ty<'tcx>
466+
b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx>
460467
) -> (ValueRef, ValueRef) {
461468
let ptr = get_dataptr(b, src);
462469
let ptr = if t.is_region_ptr() || t.is_box() {
463-
b.load_nonnull(ptr)
470+
b.load_nonnull(ptr, alignment.to_align())
464471
} else {
465-
b.load(ptr)
472+
b.load(ptr, alignment.to_align())
466473
};
467474

468475
// FIXME: emit metadata on `meta`.
469-
let meta = b.load(get_meta(b, src));
476+
let meta = b.load(get_meta(b, src), alignment.to_align());
470477

471478
(ptr, meta)
472479
}
@@ -633,7 +640,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
633640
bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
634641
};
635642
// Can return unsized value
636-
let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output());
643+
let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output(), Alignment::AbiAligned);
637644
dest_val.ty = LvalueTy::Downcast {
638645
adt_def: sig.output().ty_adt_def().unwrap(),
639646
substs: substs,
@@ -642,7 +649,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
642649
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
643650
let mut arg_idx = 0;
644651
for (i, arg_ty) in sig.inputs().iter().enumerate() {
645-
let lldestptr = dest_val.trans_field_ptr(&bcx, i);
652+
let (lldestptr, _) = dest_val.trans_field_ptr(&bcx, i);
646653
let arg = &fn_ty.args[arg_idx];
647654
arg_idx += 1;
648655
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
@@ -662,14 +669,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
662669
}
663670

664671
if let Some(cast_ty) = fn_ty.ret.cast {
665-
let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to()));
666-
let llalign = llalign_of_min(ccx, fn_ty.ret.ty);
667-
unsafe {
668-
llvm::LLVMSetAlignment(load, llalign);
669-
}
670-
bcx.ret(load)
672+
bcx.ret(bcx.load(
673+
bcx.pointercast(dest, cast_ty.ptr_to()),
674+
Some(llalign_of_min(ccx, fn_ty.ret.ty))
675+
));
671676
} else {
672-
bcx.ret(bcx.load(dest))
677+
bcx.ret(bcx.load(dest, None))
673678
}
674679
} else {
675680
bcx.ret_void();

‎src/librustc_trans/builder.rs

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@ use machine::llalign_of_pref;
1919
use type_::Type;
2020
use value::Value;
2121
use libc::{c_uint, c_char};
22-
use rustc::ty::{Ty, TyCtxt, TypeFoldable};
22+
use rustc::ty::TyCtxt;
2323
use rustc::session::Session;
24-
use type_of;
2524

2625
use std::borrow::Cow;
2726
use std::ffi::CString;
@@ -486,11 +485,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
486485
builder.dynamic_alloca(ty, name)
487486
}
488487

489-
pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef {
490-
assert!(!ty.has_param_types());
491-
self.alloca(type_of::type_of(self.ccx, ty), name)
492-
}
493-
494488
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
495489
self.count_insn("alloca");
496490
unsafe {
@@ -511,10 +505,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
511505
}
512506
}
513507

514-
pub fn load(&self, ptr: ValueRef) -> ValueRef {
508+
pub fn load(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
515509
self.count_insn("load");
516510
unsafe {
517-
llvm::LLVMBuildLoad(self.llbuilder, ptr, noname())
511+
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
512+
if let Some(align) = align {
513+
llvm::LLVMSetAlignment(load, align as c_uint);
514+
}
515+
load
518516
}
519517
}
520518

@@ -539,8 +537,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
539537

540538

541539
pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
542-
hi: u64, signed: llvm::Bool) -> ValueRef {
543-
let value = self.load(ptr);
540+
hi: u64, signed: llvm::Bool,
541+
align: Option<u32>) -> ValueRef {
542+
let value = self.load(ptr, align);
544543

545544
unsafe {
546545
let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr));
@@ -558,8 +557,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
558557
value
559558
}
560559

561-
pub fn load_nonnull(&self, ptr: ValueRef) -> ValueRef {
562-
let value = self.load(ptr);
560+
pub fn load_nonnull(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
561+
let value = self.load(ptr, align);
563562
unsafe {
564563
llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint,
565564
llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));

‎src/librustc_trans/callee.rs

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ use std::iter;
4141

4242
use syntax_pos::DUMMY_SP;
4343

44+
use mir::lvalue::Alignment;
45+
4446
#[derive(Debug)]
4547
pub enum CalleeData {
4648
/// Constructor for enum variant/tuple-like-struct.
@@ -358,29 +360,27 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
358360
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
359361
let self_idx = fn_ty.ret.is_indirect() as usize;
360362
let env_arg = &orig_fn_ty.args[0];
361-
let llenv = if env_arg.is_indirect() {
362-
llargs[self_idx]
363+
let env = if env_arg.is_indirect() {
364+
LvalueRef::new_sized_ty(llargs[self_idx], closure_ty, Alignment::AbiAligned)
363365
} else {
364-
let scratch = bcx.alloca_ty(closure_ty, "self");
366+
let scratch = LvalueRef::alloca(&bcx, closure_ty, "self");
365367
let mut llarg_idx = self_idx;
366-
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
368+
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch.llval);
367369
scratch
368370
};
369371

370-
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
372+
debug!("trans_fn_once_adapter_shim: env={:?}", env);
371373
// Adjust llargs such that llargs[self_idx..] has the call arguments.
372374
// For zero-sized closures that means sneaking in a new argument.
373375
if env_arg.is_ignore() {
374-
llargs.insert(self_idx, llenv);
376+
llargs.insert(self_idx, env.llval);
375377
} else {
376-
llargs[self_idx] = llenv;
378+
llargs[self_idx] = env.llval;
377379
}
378380

379381
// Call the by-ref closure body with `self` in a cleanup scope,
380382
// to drop `self` when the body returns, or in case it unwinds.
381-
let self_scope = CleanupScope::schedule_drop_mem(
382-
&bcx, LvalueRef::new_sized_ty(llenv, closure_ty)
383-
);
383+
let self_scope = CleanupScope::schedule_drop_mem(&bcx, env);
384384

385385
let llfn = callee.reify(bcx.ccx);
386386
let llret;
@@ -512,7 +512,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
512512
let llfnpointer = llfnpointer.unwrap_or_else(|| {
513513
// the first argument (`self`) will be ptr to the fn pointer
514514
if is_by_ref {
515-
bcx.load(self_arg)
515+
bcx.load(self_arg, None)
516516
} else {
517517
self_arg
518518
}

‎src/librustc_trans/glue.rs

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
// Code relating to drop glue.
1414

1515
use std;
16-
use std::ptr;
1716
use std::iter;
1817

1918
use llvm;
@@ -41,6 +40,7 @@ use Disr;
4140
use builder::Builder;
4241

4342
use syntax_pos::DUMMY_SP;
43+
use mir::lvalue::Alignment;
4444

4545
pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
4646
let content_ty = ptr.ty.to_ty(bcx.tcx());
@@ -199,9 +199,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
199199

200200
let value = get_param(llfn, 0);
201201
let ptr = if ccx.shared().type_is_sized(t) {
202-
LvalueRef::new_sized_ty(value, t)
202+
LvalueRef::new_sized_ty(value, t, Alignment::AbiAligned)
203203
} else {
204-
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t)
204+
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t, Alignment::AbiAligned)
205205
};
206206

207207
let skip_dtor = match g {
@@ -216,11 +216,13 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
216216
assert!(!skip_dtor);
217217
let content_ty = t.boxed_ty();
218218
let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
219-
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval));
220-
let info = bcx.load(get_meta(&bcx, ptr.llval));
221-
LvalueRef::new_unsized_ty(llbox, info, content_ty)
219+
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval), None);
220+
let info = bcx.load(get_meta(&bcx, ptr.llval), None);
221+
LvalueRef::new_unsized_ty(llbox, info, content_ty, Alignment::AbiAligned)
222222
} else {
223-
LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty)
223+
LvalueRef::new_sized_ty(
224+
bcx.load(ptr.llval, None),
225+
content_ty, Alignment::AbiAligned)
224226
};
225227
drop_ty(&bcx, ptr);
226228
trans_exchange_free_ty(&bcx, ptr);
@@ -231,7 +233,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
231233
// versus without calling Drop::drop. Assert caller is
232234
// okay with always calling the Drop impl, if any.
233235
assert!(!skip_dtor);
234-
let dtor = bcx.load(ptr.llextra);
236+
let dtor = bcx.load(ptr.llextra, None);
235237
bcx.call(dtor, &[ptr.llval], None);
236238
bcx
237239
}
@@ -384,7 +386,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
384386
let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
385387
let size_ptr = bcx.gepi(info, &[1]);
386388
let align_ptr = bcx.gepi(info, &[2]);
387-
(bcx.load(size_ptr), bcx.load(align_ptr))
389+
(bcx.load(size_ptr, None), bcx.load(align_ptr, None))
388390
}
389391
ty::TySlice(_) | ty::TyStr => {
390392
let unit_ty = t.sequence_element_type(bcx.tcx());
@@ -416,8 +418,8 @@ fn drop_structural_ty<'a, 'tcx>(
416418
let tcx = cx.tcx();
417419
for (i, field) in variant.fields.iter().enumerate() {
418420
let arg = monomorphize::field_ty(tcx, substs, field);
419-
let field_ptr = av.trans_field_ptr(&cx, i);
420-
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg));
421+
let (field_ptr, align) = av.trans_field_ptr(&cx, i);
422+
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg, align));
421423
}
422424
}
423425

@@ -426,38 +428,38 @@ fn drop_structural_ty<'a, 'tcx>(
426428
match t.sty {
427429
ty::TyClosure(def_id, substs) => {
428430
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
429-
let llupvar = ptr.trans_field_ptr(&cx, i);
430-
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty));
431+
let (llupvar, align) = ptr.trans_field_ptr(&cx, i);
432+
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty, align));
431433
}
432434
}
433435
ty::TyArray(_, n) => {
434436
let base = get_dataptr(&cx, ptr.llval);
435437
let len = C_uint(cx.ccx, n);
436438
let unit_ty = t.sequence_element_type(cx.tcx());
437439
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
438-
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
440+
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
439441
}
440442
ty::TySlice(_) | ty::TyStr => {
441443
let unit_ty = t.sequence_element_type(cx.tcx());
442444
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
443-
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
445+
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
444446
}
445447
ty::TyTuple(ref args, _) => {
446448
for (i, arg) in args.iter().enumerate() {
447-
let llfld_a = ptr.trans_field_ptr(&cx, i);
448-
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg));
449+
let (llfld_a, align) = ptr.trans_field_ptr(&cx, i);
450+
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg, align));
449451
}
450452
}
451453
ty::TyAdt(adt, substs) => match adt.adt_kind() {
452454
AdtKind::Struct => {
453455
for (i, field) in adt.variants[0].fields.iter().enumerate() {
454456
let field_ty = monomorphize::field_ty(cx.tcx(), substs, field);
455-
let mut field_ptr = ptr.clone();
456-
field_ptr.llval = ptr.trans_field_ptr(&cx, i);
457-
field_ptr.ty = LvalueTy::from_ty(field_ty);
458-
if cx.ccx.shared().type_is_sized(field_ty) {
459-
field_ptr.llextra = ptr::null_mut();
460-
}
457+
let (llval, align) = ptr.trans_field_ptr(&cx, i);
458+
let field_ptr = if cx.ccx.shared().type_is_sized(field_ty) {
459+
LvalueRef::new_sized_ty(llval, field_ty, align)
460+
} else {
461+
LvalueRef::new_unsized_ty(llval, ptr.llextra, field_ty, align)
462+
};
461463
drop_ty(&cx, field_ptr);
462464
}
463465
}
@@ -490,9 +492,8 @@ fn drop_structural_ty<'a, 'tcx>(
490492
layout::General { .. } |
491493
layout::RawNullablePointer { .. } |
492494
layout::StructWrappedNullablePointer { .. } => {
493-
let lldiscrim_a = adt::trans_get_discr(&cx, t, ptr.llval, None, false);
494-
let tcx = cx.tcx();
495-
drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize));
495+
let lldiscrim_a = adt::trans_get_discr(
496+
&cx, t, ptr.llval, ptr.alignment, None, false);
496497

497498
// Create a fall-through basic block for the "else" case of
498499
// the switch instruction we're about to generate. Note that

‎src/librustc_trans/intrinsic.rs

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ use syntax_pos::Span;
3636
use std::cmp::Ordering;
3737
use std::iter;
3838

39+
use mir::lvalue::Alignment;
40+
3941
fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
4042
let llvm_name = match name {
4143
"sqrtf32" => "llvm.sqrt.f32",
@@ -243,7 +245,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
243245
bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
244246
} else {
245247
let val = if fn_ty.args[1].is_indirect() {
246-
bcx.load(llargs[1])
248+
bcx.load(llargs[1], None)
247249
} else {
248250
from_immediate(bcx, llargs[1])
249251
};
@@ -348,7 +350,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
348350
let val_ty = substs.type_at(0);
349351
match val_ty.sty {
350352
ty::TyAdt(adt, ..) if adt.is_enum() => {
351-
adt::trans_get_discr(bcx, val_ty, llargs[0],
353+
adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned,
352354
Some(llret_ty), true)
353355
}
354356
_ => C_null(llret_ty)
@@ -547,8 +549,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
547549
// destructors, and the contents are SIMD
548550
// etc.
549551
assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
550-
let arg = LvalueRef::new_sized_ty(llarg, arg_type);
551-
(0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect()
552+
let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned);
553+
(0..contents.len()).map(|i| {
554+
let (ptr, align) = arg.trans_field_ptr(bcx, i);
555+
bcx.load(ptr, align.to_align())
556+
}).collect()
552557
}
553558
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
554559
let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
@@ -624,7 +629,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
624629
let ptr = bcx.pointercast(llresult, ty.ptr_to());
625630
bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty)));
626631
} else {
627-
store_ty(bcx, llval, llresult, ret_ty);
632+
store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
628633
}
629634
}
630635
}
@@ -780,10 +785,10 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
780785
None => bug!("msvc_try_filter not defined"),
781786
};
782787
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
783-
let addr = catchpad.load(slot);
784-
let arg1 = catchpad.load(addr);
788+
let addr = catchpad.load(slot, None);
789+
let arg1 = catchpad.load(addr, None);
785790
let val1 = C_i32(ccx, 1);
786-
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]));
791+
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
787792
let local_ptr = catchpad.bitcast(local_ptr, i64p);
788793
catchpad.store(arg1, local_ptr, None);
789794
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);

‎src/librustc_trans/meth.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
3636
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
3737
vtable_index, Value(llvtable));
3838

39-
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]))
39+
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]), None)
4040
}
4141

4242
/// Generate a shim function that allows an object type like `SomeTrait` to

‎src/librustc_trans/mir/block.rs

Lines changed: 84 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
use llvm::{self, ValueRef, BasicBlockRef};
1212
use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
1313
use rustc::middle::lang_items;
14-
use rustc::ty::{self, layout};
14+
use rustc::ty::{self, layout, TypeFoldable};
1515
use rustc::mir;
1616
use abi::{Abi, FnType, ArgType};
1717
use adt;
@@ -37,7 +37,7 @@ use std::cmp;
3737
use super::{MirContext, LocalRef};
3838
use super::analyze::CleanupKind;
3939
use super::constant::Const;
40-
use super::lvalue::LvalueRef;
40+
use super::lvalue::{Alignment, LvalueRef};
4141
use super::operand::OperandRef;
4242
use super::operand::OperandValue::{Pair, Ref, Immediate};
4343

@@ -120,7 +120,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
120120
bcx.cleanup_ret(cleanup_pad, None);
121121
} else {
122122
let ps = self.get_personality_slot(&bcx);
123-
let lp = bcx.load(ps);
123+
let lp = bcx.load(ps, None);
124124
Lifetime::End.call(&bcx, ps);
125125
if !bcx.sess().target.target.options.custom_unwind_resume {
126126
bcx.resume(lp);
@@ -147,7 +147,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
147147
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
148148
let discr_lvalue = self.trans_lvalue(&bcx, discr);
149149
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
150-
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
150+
let discr = adt::trans_get_discr(
151+
&bcx, ty, discr_lvalue.llval, discr_lvalue.alignment,
152+
None, true);
151153

152154
let mut bb_hist = FxHashMap();
153155
for target in targets {
@@ -179,7 +181,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
179181

180182
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
181183
let (otherwise, targets) = targets.split_last().unwrap();
182-
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
184+
let lv = self.trans_lvalue(&bcx, discr);
185+
let discr = bcx.load(lv.llval, lv.alignment.to_align());
183186
let discr = base::to_immediate(&bcx, discr, switch_ty);
184187
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
185188
for (value, target) in values.iter().zip(targets) {
@@ -202,29 +205,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
202205
LocalRef::Operand(None) => bug!("use of return before def"),
203206
LocalRef::Lvalue(tr_lvalue) => {
204207
OperandRef {
205-
val: Ref(tr_lvalue.llval),
208+
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
206209
ty: tr_lvalue.ty.to_ty(bcx.tcx())
207210
}
208211
}
209212
};
210213
let llslot = match op.val {
211214
Immediate(_) | Pair(..) => {
212215
let llscratch = bcx.alloca(ret.original_ty, "ret");
213-
self.store_operand(&bcx, llscratch, op, None);
216+
self.store_operand(&bcx, llscratch, None, op);
214217
llscratch
215218
}
216-
Ref(llval) => llval
219+
Ref(llval, align) => {
220+
assert_eq!(align, Alignment::AbiAligned,
221+
"return pointer is unaligned!");
222+
llval
223+
}
217224
};
218-
let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to()));
219-
let llalign = llalign_of_min(bcx.ccx, ret.ty);
220-
unsafe {
221-
llvm::LLVMSetAlignment(load, llalign);
222-
}
225+
let load = bcx.load(
226+
bcx.pointercast(llslot, cast_ty.ptr_to()),
227+
Some(llalign_of_min(bcx.ccx, ret.ty)));
223228
load
224229
} else {
225230
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
226-
if let Ref(llval) = op.val {
227-
base::load_ty(&bcx, llval, op.ty)
231+
if let Ref(llval, align) = op.val {
232+
base::load_ty(&bcx, llval, align, op.ty)
228233
} else {
229234
op.pack_if_pair(&bcx).immediate()
230235
}
@@ -425,17 +430,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
425430
// The first argument is a thin destination pointer.
426431
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
427432
let val = self.trans_operand(&bcx, &args[1]);
428-
self.store_operand(&bcx, llptr, val, None);
433+
self.store_operand(&bcx, llptr, None, val);
429434
funclet_br(self, bcx, target);
430435
return;
431436
}
432437

433438
if intrinsic == Some("transmute") {
434439
let &(ref dest, target) = destination.as_ref().unwrap();
435-
self.with_lvalue_ref(&bcx, dest, |this, dest| {
436-
this.trans_transmute(&bcx, &args[0], dest);
437-
});
438-
440+
self.trans_transmute(&bcx, &args[0], dest);
439441
funclet_br(self, bcx, target);
440442
return;
441443
}
@@ -550,7 +552,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
550552
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
551553
// Make a fake operand for store_return
552554
let op = OperandRef {
553-
val: Ref(dst),
555+
val: Ref(dst, Alignment::AbiAligned),
554556
ty: sig.output(),
555557
};
556558
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
@@ -652,33 +654,39 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
652654
}
653655

654656
// Force by-ref if we have to load through a cast pointer.
655-
let (mut llval, by_ref) = match op.val {
657+
let (mut llval, align, by_ref) = match op.val {
656658
Immediate(_) | Pair(..) => {
657659
if arg.is_indirect() || arg.cast.is_some() {
658660
let llscratch = bcx.alloca(arg.original_ty, "arg");
659-
self.store_operand(bcx, llscratch, op, None);
660-
(llscratch, true)
661+
self.store_operand(bcx, llscratch, None, op);
662+
(llscratch, Alignment::AbiAligned, true)
661663
} else {
662-
(op.pack_if_pair(bcx).immediate(), false)
664+
(op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false)
663665
}
664666
}
665-
Ref(llval) => (llval, true)
667+
Ref(llval, Alignment::Packed) if arg.is_indirect() => {
668+
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
669+
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
670+
// have scary latent bugs around.
671+
672+
let llscratch = bcx.alloca(arg.original_ty, "arg");
673+
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
674+
(llscratch, Alignment::AbiAligned, true)
675+
}
676+
Ref(llval, align) => (llval, align, true)
666677
};
667678

668679
if by_ref && !arg.is_indirect() {
669680
// Have to load the argument, maybe while casting it.
670681
if arg.original_ty == Type::i1(bcx.ccx) {
671682
// We store bools as i8 so we need to truncate to i1.
672-
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
683+
llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None);
673684
llval = bcx.trunc(llval, arg.original_ty);
674685
} else if let Some(ty) = arg.cast {
675-
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
676-
let llalign = llalign_of_min(bcx.ccx, arg.ty);
677-
unsafe {
678-
llvm::LLVMSetAlignment(llval, llalign);
679-
}
686+
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()),
687+
align.min_with(llalign_of_min(bcx.ccx, arg.ty)));
680688
} else {
681-
llval = bcx.load(llval);
689+
llval = bcx.load(llval, align.to_align());
682690
}
683691
}
684692

@@ -702,16 +710,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
702710

703711
// Handle both by-ref and immediate tuples.
704712
match tuple.val {
705-
Ref(llval) => {
713+
Ref(llval, align) => {
706714
for (n, &ty) in arg_types.iter().enumerate() {
707-
let ptr = LvalueRef::new_sized_ty(llval, tuple.ty);
708-
let ptr = ptr.trans_field_ptr(bcx, n);
715+
let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align);
716+
let (ptr, align) = ptr.trans_field_ptr(bcx, n);
709717
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
710-
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
718+
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty);
711719
Pair(lldata, llextra)
712720
} else {
713721
// trans_argument will load this if it needs to
714-
Ref(ptr)
722+
Ref(ptr, align)
715723
};
716724
let op = OperandRef {
717725
val: val,
@@ -839,15 +847,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
839847
return if fn_ret_ty.is_indirect() {
840848
// Odd, but possible, case, we have an operand temporary,
841849
// but the calling convention has an indirect return.
842-
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
843-
llargs.push(tmp);
844-
ReturnDest::IndirectOperand(tmp, index)
850+
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
851+
llargs.push(tmp.llval);
852+
ReturnDest::IndirectOperand(tmp.llval, index)
845853
} else if is_intrinsic {
846854
// Currently, intrinsics always need a location to store
847855
// the result. so we create a temporary alloca for the
848856
// result
849-
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
850-
ReturnDest::IndirectOperand(tmp, index)
857+
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
858+
ReturnDest::IndirectOperand(tmp.llval, index)
851859
} else {
852860
ReturnDest::DirectOperand(index)
853861
};
@@ -868,7 +876,34 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
868876
}
869877

870878
fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
871-
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
879+
src: &mir::Operand<'tcx>,
880+
dst: &mir::Lvalue<'tcx>) {
881+
if let mir::Lvalue::Local(index) = *dst {
882+
match self.locals[index] {
883+
LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue),
884+
LocalRef::Operand(None) => {
885+
let lvalue_ty = self.monomorphized_lvalue_ty(dst);
886+
assert!(!lvalue_ty.has_erasable_regions());
887+
let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp");
888+
self.trans_transmute_into(bcx, src, &lvalue);
889+
let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty);
890+
self.locals[index] = LocalRef::Operand(Some(op));
891+
}
892+
LocalRef::Operand(Some(_)) => {
893+
let ty = self.monomorphized_lvalue_ty(dst);
894+
assert!(common::type_is_zero_size(bcx.ccx, ty),
895+
"assigning to initialized SSAtemp");
896+
}
897+
}
898+
} else {
899+
let dst = self.trans_lvalue(bcx, dst);
900+
self.trans_transmute_into(bcx, src, &dst);
901+
}
902+
}
903+
904+
fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
905+
src: &mir::Operand<'tcx>,
906+
dst: &LvalueRef<'tcx>) {
872907
let mut val = self.trans_operand(bcx, src);
873908
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
874909
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx()));
@@ -892,7 +927,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
892927
let in_type = val.ty;
893928
let out_type = dst.ty.to_ty(bcx.tcx());;
894929
let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type));
895-
self.store_operand(bcx, cast_ptr, val, Some(llalign));
930+
self.store_operand(bcx, cast_ptr, Some(llalign), val);
896931
}
897932

898933

@@ -908,15 +943,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
908943
Nothing => (),
909944
Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
910945
IndirectOperand(tmp, index) => {
911-
let op = self.trans_load(bcx, tmp, op.ty);
946+
let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty);
912947
self.locals[index] = LocalRef::Operand(Some(op));
913948
}
914949
DirectOperand(index) => {
915950
// If there is a cast, we have to store and reload.
916951
let op = if ret_ty.cast.is_some() {
917-
let tmp = bcx.alloca_ty(op.ty, "tmp_ret");
918-
ret_ty.store(bcx, op.immediate(), tmp);
919-
self.trans_load(bcx, tmp, op.ty)
952+
let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret");
953+
ret_ty.store(bcx, op.immediate(), tmp.llval);
954+
self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty)
920955
} else {
921956
op.unpack_if_pair(bcx)
922957
};

‎src/librustc_trans/mir/constant.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ use syntax_pos::Span;
4040
use std::fmt;
4141
use std::ptr;
4242

43+
use super::lvalue::Alignment;
4344
use super::operand::{OperandRef, OperandValue};
4445
use super::MirContext;
4546

@@ -140,7 +141,7 @@ impl<'tcx> Const<'tcx> {
140141
// a constant LLVM global and cast its address if necessary.
141142
let align = type_of::align_of(ccx, self.ty);
142143
let ptr = consts::addr_of(ccx, self.llval, align, "const");
143-
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()))
144+
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
144145
};
145146

146147
OperandRef {

‎src/librustc_trans/mir/lvalue.rs

Lines changed: 88 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -15,20 +15,61 @@ use rustc::mir::tcx::LvalueTy;
1515
use rustc_data_structures::indexed_vec::Idx;
1616
use adt;
1717
use builder::Builder;
18-
use common::{self, CrateContext, C_uint, C_undef};
18+
use common::{self, CrateContext, C_uint};
1919
use consts;
2020
use machine;
21-
use type_of::type_of;
2221
use type_of;
2322
use type_::Type;
2423
use value::Value;
2524
use glue;
2625

2726
use std::ptr;
27+
use std::ops;
2828

2929
use super::{MirContext, LocalRef};
3030
use super::operand::OperandValue;
3131

32+
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
33+
pub enum Alignment {
34+
Packed,
35+
AbiAligned,
36+
}
37+
38+
impl ops::BitOr for Alignment {
39+
type Output = Self;
40+
41+
fn bitor(self, rhs: Self) -> Self {
42+
match (self, rhs) {
43+
(Alignment::Packed, _) => Alignment::Packed,
44+
(Alignment::AbiAligned, a) => a,
45+
}
46+
}
47+
}
48+
49+
impl Alignment {
50+
pub fn from_packed(packed: bool) -> Self {
51+
if packed {
52+
Alignment::Packed
53+
} else {
54+
Alignment::AbiAligned
55+
}
56+
}
57+
58+
pub fn to_align(self) -> Option<u32> {
59+
match self {
60+
Alignment::Packed => Some(1),
61+
Alignment::AbiAligned => None,
62+
}
63+
}
64+
65+
pub fn min_with(self, align: u32) -> Option<u32> {
66+
match self {
67+
Alignment::Packed => Some(1),
68+
Alignment::AbiAligned => Some(align),
69+
}
70+
}
71+
}
72+
3273
#[derive(Copy, Clone, Debug)]
3374
pub struct LvalueRef<'tcx> {
3475
/// Pointer to the contents of the lvalue
@@ -39,25 +80,38 @@ pub struct LvalueRef<'tcx> {
3980

4081
/// Monomorphized type of this lvalue, including variant information
4182
pub ty: LvalueTy<'tcx>,
83+
84+
/// Whether this lvalue is known to be aligned according to its layout
85+
pub alignment: Alignment,
4286
}
4387

4488
impl<'a, 'tcx> LvalueRef<'tcx> {
45-
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
46-
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
89+
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>,
90+
alignment: Alignment) -> LvalueRef<'tcx> {
91+
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment }
4792
}
4893

49-
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
50-
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty))
94+
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
95+
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
5196
}
5297

53-
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
98+
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>, alignment: Alignment)
99+
-> LvalueRef<'tcx> {
54100
LvalueRef {
55101
llval: llval,
56102
llextra: llextra,
57103
ty: LvalueTy::from_ty(ty),
104+
alignment: alignment,
58105
}
59106
}
60107

108+
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
109+
debug!("alloca({:?}: {:?})", name, ty);
110+
let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
111+
assert!(!ty.has_param_types());
112+
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
113+
}
114+
61115
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
62116
let ty = self.ty.to_ty(ccx.tcx());
63117
match ty.sty {
@@ -81,10 +135,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
81135
fields: &Vec<Ty<'tcx>>,
82136
ix: usize,
83137
needs_cast: bool
84-
) -> ValueRef {
138+
) -> (ValueRef, Alignment) {
85139
let fty = fields[ix];
86140
let ccx = bcx.ccx;
87141

142+
let alignment = self.alignment | Alignment::from_packed(st.packed);
143+
88144
let ptr_val = if needs_cast {
89145
let fields = st.field_index_by_increasing_offset().map(|i| {
90146
type_of::in_memory_type_of(ccx, fields[i])
@@ -101,14 +157,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
101157
// * Field is sized - pointer is properly aligned already
102158
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
103159
bcx.ccx.shared().type_is_sized(fty) {
104-
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
160+
return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
105161
}
106162

107163
// If the type of the last field is [T] or str, then we don't need to do
108164
// any adjusments
109165
match fty.sty {
110166
ty::TySlice(..) | ty::TyStr => {
111-
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
167+
return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
112168
}
113169
_ => ()
114170
}
@@ -117,7 +173,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
117173
if !self.has_extra() {
118174
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
119175
ix, Value(ptr_val));
120-
return bcx.struct_gep(ptr_val, ix);
176+
return (bcx.struct_gep(ptr_val, ix), alignment);
121177
}
122178

123179
// We need to get the pointer manually now.
@@ -163,11 +219,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
163219
// Finally, cast back to the type expected
164220
let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
165221
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
166-
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
222+
(bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
167223
}
168224

169225
/// Access a field, at a point when the value's case is known.
170-
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef {
226+
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
171227
let discr = match self.ty {
172228
LvalueTy::Ty { .. } => 0,
173229
LvalueTy::Downcast { variant_index, .. } => variant_index,
@@ -186,17 +242,18 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
186242
layout::Vector { count, .. } => {
187243
assert_eq!(discr, 0);
188244
assert!((ix as u64) < count);
189-
bcx.struct_gep(self.llval, ix)
245+
(bcx.struct_gep(self.llval, ix), self.alignment)
190246
}
191247
layout::General { discr: d, ref variants, .. } => {
192248
let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
193249
fields.insert(0, d.to_ty(&bcx.tcx(), false));
194250
self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
195251
}
196-
layout::UntaggedUnion { .. } => {
252+
layout::UntaggedUnion { ref variants } => {
197253
let fields = adt::compute_fields(bcx.ccx, t, 0, false);
198254
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
199-
bcx.pointercast(self.llval, ty.ptr_to())
255+
(bcx.pointercast(self.llval, ty.ptr_to()),
256+
self.alignment | Alignment::from_packed(variants.packed))
200257
}
201258
layout::RawNullablePointer { nndiscr, .. } |
202259
layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
@@ -205,19 +262,19 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
205262
// (e.d., Result of Either with (), as one side.)
206263
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
207264
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
208-
bcx.pointercast(self.llval, ty.ptr_to())
265+
(bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed)
209266
}
210267
layout::RawNullablePointer { nndiscr, .. } => {
211268
let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
212269
assert_eq!(ix, 0);
213270
assert_eq!(discr as u64, nndiscr);
214271
let ty = type_of::type_of(bcx.ccx, nnty);
215-
bcx.pointercast(self.llval, ty.ptr_to())
272+
(bcx.pointercast(self.llval, ty.ptr_to()), self.alignment)
216273
}
217274
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
218275
assert_eq!(discr as u64, nndiscr);
219276
self.struct_field_ptr(bcx, &nonnull,
220-
&adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
277+
&adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
221278
}
222279
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
223280
}
@@ -250,7 +307,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
250307
mir::Lvalue::Static(def_id) => {
251308
let const_ty = self.monomorphized_lvalue_ty(lvalue);
252309
LvalueRef::new_sized(consts::get_static(ccx, def_id),
253-
LvalueTy::from_ty(const_ty))
310+
LvalueTy::from_ty(const_ty),
311+
Alignment::AbiAligned)
254312
},
255313
mir::Lvalue::Projection(box mir::Projection {
256314
ref base,
@@ -264,18 +322,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
264322
let (llptr, llextra) = match ptr.val {
265323
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
266324
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
267-
OperandValue::Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty)
325+
OperandValue::Ref(..) => bug!("Deref of by-Ref type {:?}", ptr.ty)
268326
};
269327
LvalueRef {
270328
llval: llptr,
271329
llextra: llextra,
272330
ty: projected_ty,
331+
alignment: Alignment::AbiAligned,
273332
}
274333
}
275334
mir::Lvalue::Projection(ref projection) => {
276335
let tr_base = self.trans_lvalue(bcx, &projection.base);
277336
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
278337
let projected_ty = self.monomorphize(&projected_ty);
338+
let align = tr_base.alignment;
279339

280340
let project_index = |llindex| {
281341
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
@@ -285,10 +345,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
285345
let zero = common::C_uint(bcx.ccx, 0u64);
286346
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
287347
};
288-
element
348+
(element, align)
289349
};
290350

291-
let (llprojected, llextra) = match projection.elem {
351+
let ((llprojected, align), llextra) = match projection.elem {
292352
mir::ProjectionElem::Deref => bug!(),
293353
mir::ProjectionElem::Field(ref field, _) => {
294354
let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) {
@@ -318,7 +378,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
318378
}
319379
mir::ProjectionElem::Subslice { from, to } => {
320380
let llindex = C_uint(bcx.ccx, from);
321-
let llbase = project_index(llindex);
381+
let (llbase, align) = project_index(llindex);
322382

323383
let base_ty = tr_base.ty.to_ty(bcx.tcx());
324384
match base_ty.sty {
@@ -328,71 +388,33 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
328388
let base_ty = self.monomorphized_lvalue_ty(lvalue);
329389
let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
330390
let llbase = bcx.pointercast(llbase, llbasety);
331-
(llbase, ptr::null_mut())
391+
((llbase, align), ptr::null_mut())
332392
}
333393
ty::TySlice(..) => {
334394
assert!(tr_base.llextra != ptr::null_mut());
335395
let lllen = bcx.sub(tr_base.llextra,
336396
C_uint(bcx.ccx, from+to));
337-
(llbase, lllen)
397+
((llbase, align), lllen)
338398
}
339399
_ => bug!("unexpected type {:?} in Subslice", base_ty)
340400
}
341401
}
342402
mir::ProjectionElem::Downcast(..) => {
343-
(tr_base.llval, tr_base.llextra)
403+
((tr_base.llval, align), tr_base.llextra)
344404
}
345405
};
346406
LvalueRef {
347407
llval: llprojected,
348408
llextra: llextra,
349409
ty: projected_ty,
410+
alignment: align,
350411
}
351412
}
352413
};
353414
debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result);
354415
result
355416
}
356417

357-
// Perform an action using the given Lvalue.
358-
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
359-
// is created first, then used as an operand to update the Lvalue.
360-
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
361-
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
362-
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
363-
{
364-
if let mir::Lvalue::Local(index) = *lvalue {
365-
match self.locals[index] {
366-
LocalRef::Lvalue(lvalue) => f(self, lvalue),
367-
LocalRef::Operand(None) => {
368-
let lvalue_ty = self.monomorphized_lvalue_ty(lvalue);
369-
assert!(!lvalue_ty.has_erasable_regions());
370-
let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp");
371-
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty));
372-
let ret = f(self, lvalue);
373-
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
374-
self.locals[index] = LocalRef::Operand(Some(op));
375-
ret
376-
}
377-
LocalRef::Operand(Some(_)) => {
378-
// See comments in LocalRef::new_operand as to why
379-
// we always have Some in a ZST LocalRef::Operand.
380-
let ty = self.monomorphized_lvalue_ty(lvalue);
381-
if common::type_is_zero_size(bcx.ccx, ty) {
382-
// Pass an undef pointer as no stores can actually occur.
383-
let llptr = C_undef(type_of(bcx.ccx, ty).ptr_to());
384-
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
385-
} else {
386-
bug!("Lvalue local already set");
387-
}
388-
}
389-
}
390-
} else {
391-
let lvalue = self.trans_lvalue(bcx, lvalue);
392-
f(self, lvalue)
393-
}
394-
}
395-
396418
/// Adjust the bitwidth of an index since LLVM is less forgiving
397419
/// than we are.
398420
///

‎src/librustc_trans/mir/mod.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx};
3838
pub use self::constant::trans_static_initializer;
3939

4040
use self::analyze::CleanupKind;
41-
use self::lvalue::LvalueRef;
41+
use self::lvalue::{Alignment, LvalueRef};
4242
use rustc::mir::traversal;
4343

4444
use self::operand::{OperandRef, OperandValue};
@@ -269,8 +269,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
269269

270270
debug!("alloc: {:?} ({}) -> lvalue", local, name);
271271
assert!(!ty.has_erasable_regions());
272-
let lltemp = bcx.alloca_ty(ty, &name.as_str());
273-
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty));
272+
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
274273
if dbg {
275274
let (scope, span) = mircx.debug_loc(source_info);
276275
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
@@ -283,12 +282,12 @@ pub fn trans_mir<'a, 'tcx: 'a>(
283282
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
284283
debug!("alloc: {:?} (return pointer) -> lvalue", local);
285284
let llretptr = llvm::get_param(llfn, 0);
286-
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
285+
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty),
286+
Alignment::AbiAligned))
287287
} else if lvalue_locals.contains(local.index()) {
288288
debug!("alloc: {:?} -> lvalue", local);
289289
assert!(!ty.has_erasable_regions());
290-
let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local));
291-
LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)))
290+
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local)))
292291
} else {
293292
// If this is an immediate local, we do not create an
294293
// alloca in advance. Instead we wait until we see the
@@ -388,9 +387,9 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
388387
_ => bug!("spread argument isn't a tuple?!")
389388
};
390389

391-
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
390+
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
392391
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
393-
let dst = bcx.struct_gep(lltemp, i);
392+
let dst = bcx.struct_gep(lvalue.llval, i);
394393
let arg = &mircx.fn_ty.args[idx];
395394
idx += 1;
396395
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
@@ -409,7 +408,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
409408
// we can create one debuginfo entry for the argument.
410409
arg_scope.map(|scope| {
411410
let variable_access = VariableAccess::DirectVariable {
412-
alloca: lltemp
411+
alloca: lvalue.llval
413412
};
414413
declare_local(
415414
bcx,
@@ -422,7 +421,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
422421
);
423422
});
424423

425-
return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
424+
return LocalRef::Lvalue(lvalue);
426425
}
427426

428427
let arg = &mircx.fn_ty.args[idx];
@@ -469,21 +468,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
469468
};
470469
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
471470
} else {
472-
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
471+
let lltemp = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
473472
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
474473
// we pass fat pointers as two words, but we want to
475474
// represent them internally as a pointer to two words,
476475
// so make an alloca to store them in.
477476
let meta = &mircx.fn_ty.args[idx];
478477
idx += 1;
479-
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp));
480-
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp));
478+
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval));
479+
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval));
481480
} else {
482481
// otherwise, arg is passed by value, so make a
483482
// temporary and store it there
484-
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp);
483+
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval);
485484
}
486-
lltemp
485+
lltemp.llval
487486
};
488487
arg_scope.map(|scope| {
489488
// Is this a regular argument?
@@ -573,7 +572,8 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
573572
);
574573
}
575574
});
576-
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
575+
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty),
576+
Alignment::AbiAligned))
577577
}).collect()
578578
}
579579

‎src/librustc_trans/mir/operand.rs

Lines changed: 26 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
use llvm::ValueRef;
1212
use rustc::ty::Ty;
13+
use rustc::ty::layout::Layout;
1314
use rustc::mir;
1415
use rustc_data_structures::indexed_vec::Idx;
1516

@@ -23,6 +24,7 @@ use type_::Type;
2324
use std::fmt;
2425

2526
use super::{MirContext, LocalRef};
27+
use super::lvalue::Alignment;
2628

2729
/// The representation of a Rust value. The enum variant is in fact
2830
/// uniquely determined by the value's type, but is kept as a
@@ -31,7 +33,7 @@ use super::{MirContext, LocalRef};
3133
pub enum OperandValue {
3234
/// A reference to the actual operand. The data is guaranteed
3335
/// to be valid for the operand's lifetime.
34-
Ref(ValueRef),
36+
Ref(ValueRef, Alignment),
3537
/// A single LLVM value.
3638
Immediate(ValueRef),
3739
/// A pair of immediate LLVM values. Used by fat pointers too.
@@ -58,9 +60,9 @@ pub struct OperandRef<'tcx> {
5860
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
5961
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
6062
match self.val {
61-
OperandValue::Ref(r) => {
62-
write!(f, "OperandRef(Ref({:?}) @ {:?})",
63-
Value(r), self.ty)
63+
OperandValue::Ref(r, align) => {
64+
write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})",
65+
Value(r), align, self.ty)
6466
}
6567
OperandValue::Immediate(i) => {
6668
write!(f, "OperandRef(Immediate({:?}) @ {:?})",
@@ -137,27 +139,33 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
137139
pub fn trans_load(&mut self,
138140
bcx: &Builder<'a, 'tcx>,
139141
llval: ValueRef,
142+
align: Alignment,
140143
ty: Ty<'tcx>)
141144
-> OperandRef<'tcx>
142145
{
143146
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
144147

145148
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
146-
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty);
149+
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
147150
OperandValue::Pair(lldata, llextra)
148151
} else if common::type_is_imm_pair(bcx.ccx, ty) {
152+
let f_align = match *bcx.ccx.layout_of(ty) {
153+
Layout::Univariant { ref variant, .. } =>
154+
Alignment::from_packed(variant.packed) | align,
155+
_ => align
156+
};
149157
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
150158
let a_ptr = bcx.struct_gep(llval, 0);
151159
let b_ptr = bcx.struct_gep(llval, 1);
152160

153161
OperandValue::Pair(
154-
base::load_ty(bcx, a_ptr, a_ty),
155-
base::load_ty(bcx, b_ptr, b_ty)
162+
base::load_ty(bcx, a_ptr, f_align, a_ty),
163+
base::load_ty(bcx, b_ptr, f_align, b_ty)
156164
)
157165
} else if common::type_is_immediate(bcx.ccx, ty) {
158-
OperandValue::Immediate(base::load_ty(bcx, llval, ty))
166+
OperandValue::Immediate(base::load_ty(bcx, llval, align, ty))
159167
} else {
160-
OperandValue::Ref(llval)
168+
OperandValue::Ref(llval, align)
161169
};
162170

163171
OperandRef { val: val, ty: ty }
@@ -212,7 +220,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
212220
// out from their home
213221
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
214222
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
215-
self.trans_load(bcx, tr_lvalue.llval, ty)
223+
self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty)
216224
}
217225

218226
pub fn trans_operand(&mut self,
@@ -230,9 +238,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
230238
mir::Operand::Constant(ref constant) => {
231239
let val = self.trans_constant(bcx, constant);
232240
let operand = val.to_operand(bcx.ccx);
233-
if let OperandValue::Ref(ptr) = operand.val {
241+
if let OperandValue::Ref(ptr, align) = operand.val {
234242
// If this is a OperandValue::Ref to an immediate constant, load it.
235-
self.trans_load(bcx, ptr, operand.ty)
243+
self.trans_load(bcx, ptr, align, operand.ty)
236244
} else {
237245
operand
238246
}
@@ -243,16 +251,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
243251
pub fn store_operand(&mut self,
244252
bcx: &Builder<'a, 'tcx>,
245253
lldest: ValueRef,
246-
operand: OperandRef<'tcx>,
247-
align: Option<u32>) {
254+
align: Option<u32>,
255+
operand: OperandRef<'tcx>) {
248256
debug!("store_operand: operand={:?}, align={:?}", operand, align);
249257
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
250258
// value is through `undef`, and store itself is useless.
251259
if common::type_is_zero_size(bcx.ccx, operand.ty) {
252260
return;
253261
}
254262
match operand.val {
255-
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty, align),
263+
OperandValue::Ref(r, Alignment::Packed) =>
264+
base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)),
265+
OperandValue::Ref(r, Alignment::AbiAligned) =>
266+
base::memcpy_ty(bcx, lldest, r, operand.ty, align),
256267
OperandValue::Immediate(s) => {
257268
bcx.store(base::from_immediate(bcx, s), lldest, align);
258269
}

‎src/librustc_trans/mir/rvalue.rs

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ use Disr;
3333
use super::MirContext;
3434
use super::constant::const_scalar_checked_binop;
3535
use super::operand::{OperandRef, OperandValue};
36-
use super::lvalue::{LvalueRef};
36+
use super::lvalue::LvalueRef;
3737

3838
impl<'a, 'tcx> MirContext<'a, 'tcx> {
3939
pub fn trans_rvalue(&mut self,
@@ -50,7 +50,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
5050
let tr_operand = self.trans_operand(&bcx, operand);
5151
// FIXME: consider not copying constants through stack. (fixable by translating
5252
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
53-
self.store_operand(&bcx, dest.llval, tr_operand, None);
53+
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
5454
bcx
5555
}
5656

@@ -61,7 +61,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
6161
// into-coerce of a thin pointer to a fat pointer - just
6262
// use the operand path.
6363
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
64-
self.store_operand(&bcx, dest.llval, temp, None);
64+
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
6565
return bcx;
6666
}
6767

@@ -81,13 +81,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
8181
// index into the struct, and this case isn't
8282
// important enough for it.
8383
debug!("trans_rvalue: creating ugly alloca");
84-
let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp");
85-
base::store_ty(&bcx, llval, lltemp, operand.ty);
86-
lltemp
84+
let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
85+
base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
86+
scratch
87+
}
88+
OperandValue::Ref(llref, align) => {
89+
LvalueRef::new_sized_ty(llref, operand.ty, align)
8790
}
88-
OperandValue::Ref(llref) => llref
8991
};
90-
base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty);
92+
base::coerce_unsized_into(&bcx, &llref, &dest);
9193
bcx
9294
}
9395

@@ -97,7 +99,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
9799
let size = C_uint(bcx.ccx, size);
98100
let base = base::get_dataptr(&bcx, dest.llval);
99101
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
100-
self.store_operand(bcx, llslot, tr_elem, None);
102+
self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
101103
})
102104
}
103105

@@ -111,15 +113,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
111113
let op = self.trans_operand(&bcx, operand);
112114
// Do not generate stores and GEPis for zero-sized fields.
113115
if !common::type_is_zero_size(bcx.ccx, op.ty) {
114-
let mut val = LvalueRef::new_sized(dest.llval, dest.ty);
116+
let mut val = LvalueRef::new_sized(
117+
dest.llval, dest.ty, dest.alignment);
115118
let field_index = active_field_index.unwrap_or(i);
116119
val.ty = LvalueTy::Downcast {
117120
adt_def: adt_def,
118121
substs: self.monomorphize(&substs),
119122
variant_index: disr.0 as usize,
120123
};
121-
let lldest_i = val.trans_field_ptr(&bcx, field_index);
122-
self.store_operand(&bcx, lldest_i, op, None);
124+
let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
125+
self.store_operand(&bcx, lldest_i, align.to_align(), op);
123126
}
124127
}
125128
},
@@ -131,6 +134,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
131134
} else {
132135
None
133136
};
137+
let alignment = dest.alignment;
134138
for (i, operand) in operands.iter().enumerate() {
135139
let op = self.trans_operand(&bcx, operand);
136140
// Do not generate stores and GEPis for zero-sized fields.
@@ -144,7 +148,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
144148
i
145149
};
146150
let dest = bcx.gepi(dest.llval, &[0, i]);
147-
self.store_operand(&bcx, dest, op, None);
151+
self.store_operand(&bcx, dest, alignment.to_align(), op);
148152
}
149153
}
150154
}
@@ -169,7 +173,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
169173
_ => {
170174
assert!(rvalue_creates_operand(rvalue));
171175
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
172-
self.store_operand(&bcx, dest.llval, temp, None);
176+
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
173177
bcx
174178
}
175179
}
@@ -228,7 +232,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
228232
operand.ty, cast_ty);
229233
OperandValue::Pair(lldata, llextra)
230234
}
231-
OperandValue::Ref(_) => {
235+
OperandValue::Ref(..) => {
232236
bug!("by-ref operand {:?} in trans_rvalue_operand",
233237
operand);
234238
}

‎src/test/codegen/packed.rs

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
// compile-flags: -C no-prepopulate-passes
12+
13+
#![crate_type = "lib"]
14+
15+
#[repr(packed)]
16+
pub struct Packed {
17+
dealign: u8,
18+
data: u32
19+
}
20+
21+
// CHECK-LABEL: @write_pkd
22+
#[no_mangle]
23+
pub fn write_pkd(pkd: &mut Packed) -> u32 {
24+
// CHECK: %{{.*}} = load i32, i32* %{{.*}}, align 1
25+
// CHECK: store i32 42, i32* %{{.*}}, align 1
26+
let result = pkd.data;
27+
pkd.data = 42;
28+
result
29+
}

0 commit comments

Comments
 (0)
Please sign in to comment.