diff --git a/src/librustc/middle/intrinsicck.rs b/src/librustc/middle/intrinsicck.rs index c9722adc9510c..8dc298b9c2a17 100644 --- a/src/librustc/middle/intrinsicck.rs +++ b/src/librustc/middle/intrinsicck.rs @@ -89,7 +89,7 @@ impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> { let from = unpack_option_like(self.infcx.tcx.global_tcx(), from); match (&from.sty, sk_to) { (&ty::TyFnDef(..), SizeSkeleton::Known(size_to)) - if size_to == Pointer.size(&self.infcx.tcx.data_layout) => { + if size_to == Pointer.size(self.infcx) => { struct_span_err!(self.infcx.tcx.sess, span, E0591, "`{}` is zero-sized and can't be transmuted to `{}`", from, to) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 571ef30b6b909..54e5de3909086 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -25,6 +25,7 @@ use std::cmp; use std::fmt; use std::i64; use std::iter; +use std::ops::Deref; /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. @@ -201,6 +202,16 @@ impl TargetDataLayout { } } +pub trait HasDataLayout: Copy { + fn data_layout(&self) -> &TargetDataLayout; +} + +impl<'a> HasDataLayout for &'a TargetDataLayout { + fn data_layout(&self) -> &TargetDataLayout { + self + } +} + /// Endianness of the target, which must match cfg(target-endian). #[derive(Copy, Clone)] pub enum Endian { @@ -241,7 +252,9 @@ impl Size { Size::from_bytes((self.bytes() + mask) & !mask) } - pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option { + pub fn checked_add(self, offset: Size, cx: C) -> Option { + let dl = cx.data_layout(); + // Each Size is less than dl.obj_size_bound(), so the sum is // also less than 1 << 62 (and therefore can't overflow). let bytes = self.bytes() + offset.bytes(); @@ -253,7 +266,9 @@ impl Size { } } - pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option { + pub fn checked_mul(self, count: u64, cx: C) -> Option { + let dl = cx.data_layout(); + // Each Size is less than dl.obj_size_bound(), so the sum is // also less than 1 << 62 (and therefore can't overflow). match self.bytes().checked_mul(count) { @@ -353,7 +368,9 @@ impl Integer { } } - pub fn align(&self, dl: &TargetDataLayout)-> Align { + pub fn align(&self, cx: C) -> Align { + let dl = cx.data_layout(); + match *self { I1 => dl.i1_align, I8 => dl.i8_align, @@ -407,7 +424,9 @@ impl Integer { } /// Find the smallest integer with the given alignment. - pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option { + pub fn for_abi_align(cx: C, align: Align) -> Option { + let dl = cx.data_layout(); + let wanted = align.abi(); for &candidate in &[I8, I16, I32, I64] { let ty = Int(candidate); @@ -419,7 +438,9 @@ impl Integer { } /// Get the Integer type from an attr::IntType. - pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer { + pub fn from_attr(cx: C, ity: attr::IntType) -> Integer { + let dl = cx.data_layout(); + match ity { attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8, attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16, @@ -449,7 +470,7 @@ impl Integer { let min_default = I8; if let Some(ity) = repr.int { - let discr = Integer::from_attr(&tcx.data_layout, ity); + let discr = Integer::from_attr(tcx, ity); let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; if discr < fit { bug!("Integer::repr_discr: `#[repr]` hint too small for \ @@ -490,7 +511,9 @@ pub enum Primitive { } impl Primitive { - pub fn size(self, dl: &TargetDataLayout) -> Size { + pub fn size(self, cx: C) -> Size { + let dl = cx.data_layout(); + match self { Int(I1) | Int(I8) => Size::from_bits(8), Int(I16) => Size::from_bits(16), @@ -501,7 +524,9 @@ impl Primitive { } } - pub fn align(self, dl: &TargetDataLayout) -> Align { + pub fn align(self, cx: C) -> Align { + let dl = cx.data_layout(); + match self { Int(I1) => dl.i1_align, Int(I8) => dl.i8_align, @@ -681,8 +706,8 @@ impl<'a, 'gcx, 'tcx> Struct { } /// Determine whether a structure would be zero-sized, given its fields. - pub fn would_be_zero_sized(dl: &TargetDataLayout, fields: I) - -> Result> + fn would_be_zero_sized(dl: &TargetDataLayout, fields: I) + -> Result> where I: Iterator>> { for field in fields { let field = field?; @@ -830,7 +855,7 @@ pub struct Union { } impl<'a, 'gcx, 'tcx> Union { - pub fn new(dl: &TargetDataLayout, packed: bool) -> Union { + fn new(dl: &TargetDataLayout, packed: bool) -> Union { Union { align: if packed { dl.i8_align } else { dl.aggregate_align }, min_size: Size::from_bytes(0), @@ -839,10 +864,10 @@ impl<'a, 'gcx, 'tcx> Union { } /// Extend the Struct with more fields. - pub fn extend(&mut self, dl: &TargetDataLayout, - fields: I, - scapegoat: Ty<'gcx>) - -> Result<(), LayoutError<'gcx>> + fn extend(&mut self, dl: &TargetDataLayout, + fields: I, + scapegoat: Ty<'gcx>) + -> Result<(), LayoutError<'gcx>> where I: Iterator>> { for (index, field) in fields.enumerate() { let field = field?; @@ -904,7 +929,8 @@ pub enum Layout { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, align: Align, - size: Size + element_size: Size, + count: u64 }, /// TyRawPtr or TyRef with a !Sized pointee. @@ -1087,25 +1113,35 @@ impl<'a, 'gcx, 'tcx> Layout { // Arrays and slices. ty::TyArray(element, count) => { let element = element.layout(infcx)?; + let element_size = element.size(dl); + // FIXME(eddyb) Don't use host `usize` for array lengths. + let usize_count: usize = count; + let count = usize_count as u64; + if element_size.checked_mul(count, dl).is_none() { + return Err(LayoutError::SizeOverflow(ty)); + } Array { sized: true, align: element.align(dl), - size: element.size(dl).checked_mul(count as u64, dl) - .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)? + element_size: element_size, + count: count } } ty::TySlice(element) => { + let element = element.layout(infcx)?; Array { sized: false, - align: element.layout(infcx)?.align(dl), - size: Size::from_bytes(0) + align: element.align(dl), + element_size: element.size(dl), + count: 0 } } ty::TyStr => { Array { sized: false, align: dl.i8_align, - size: Size::from_bytes(0) + element_size: Size::from_bytes(1), + count: 0 } } @@ -1440,22 +1476,32 @@ impl<'a, 'gcx, 'tcx> Layout { } } - pub fn size(&self, dl: &TargetDataLayout) -> Size { + pub fn size(&self, cx: C) -> Size { + let dl = cx.data_layout(); + match *self { Scalar { value, .. } | RawNullablePointer { value, .. } => { value.size(dl) } Vector { element, count } => { - let elem_size = element.size(dl); - let vec_size = match elem_size.checked_mul(count, dl) { + let element_size = element.size(dl); + let vec_size = match element_size.checked_mul(count, dl) { Some(size) => size, None => bug!("Layout::size({:?}): {} * {} overflowed", - self, elem_size.bytes(), count) + self, element_size.bytes(), count) }; vec_size.abi_align(self.align(dl)) } + Array { element_size, count, .. } => { + match element_size.checked_mul(count, dl) { + Some(size) => size, + None => bug!("Layout::size({:?}): {} * {} overflowed", + self, element_size.bytes(), count) + } + } + FatPointer { metadata, .. } => { // Effectively a (ptr, meta) tuple. Pointer.size(dl).abi_align(metadata.align(dl)) @@ -1464,7 +1510,7 @@ impl<'a, 'gcx, 'tcx> Layout { } CEnum { discr, .. } => Int(discr).size(dl), - Array { size, .. } | General { size, .. } => size, + General { size, .. } => size, UntaggedUnion { ref variants } => variants.stride(), Univariant { ref variant, .. } | @@ -1474,7 +1520,9 @@ impl<'a, 'gcx, 'tcx> Layout { } } - pub fn align(&self, dl: &TargetDataLayout) -> Align { + pub fn align(&self, cx: C) -> Align { + let dl = cx.data_layout(); + match *self { Scalar { value, .. } | RawNullablePointer { value, .. } => { value.align(dl) @@ -1513,6 +1561,61 @@ impl<'a, 'gcx, 'tcx> Layout { } } } + + pub fn field_offset(&self, + cx: C, + i: usize, + variant_index: Option) + -> Size { + let dl = cx.data_layout(); + + match *self { + Scalar { .. } | + CEnum { .. } | + UntaggedUnion { .. } | + RawNullablePointer { .. } => { + Size::from_bytes(0) + } + + Vector { element, count } => { + let element_size = element.size(dl); + let i = i as u64; + assert!(i < count); + Size::from_bytes(element_size.bytes() * count) + } + + Array { element_size, count, .. } => { + let i = i as u64; + assert!(i < count); + Size::from_bytes(element_size.bytes() * count) + } + + FatPointer { metadata, .. } => { + // Effectively a (ptr, meta) tuple. + assert!(i < 2); + if i == 0 { + Size::from_bytes(0) + } else { + Pointer.size(dl).abi_align(metadata.align(dl)) + } + } + + Univariant { ref variant, .. } => variant.offsets[i], + + General { ref variants, .. } => { + let v = variant_index.expect("variant index required"); + variants[v].offsets[i + 1] + } + + StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + if Some(nndiscr as usize) == variant_index { + nonnull.offsets[i] + } else { + Size::from_bytes(0) + } + } + } + } } /// Type size "skeleton", i.e. the only information determining a type's size. @@ -1544,7 +1647,7 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> { // First try computing a static layout. let err = match ty.layout(infcx) { Ok(layout) => { - return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout))); + return Ok(SizeSkeleton::Known(layout.size(tcx))); } Err(err) => err }; @@ -1658,3 +1761,192 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> { } } } + +/// A pair of a type and its layout. Implements various +/// type traversal APIs (e.g. recursing into fields). +#[derive(Copy, Clone, Debug)] +pub struct TyLayout<'tcx> { + pub ty: Ty<'tcx>, + pub layout: &'tcx Layout, + pub variant_index: Option, +} + +impl<'tcx> Deref for TyLayout<'tcx> { + type Target = Layout; + fn deref(&self) -> &Layout { + self.layout + } +} + +pub trait HasTyCtxt<'tcx>: HasDataLayout { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; +} + +impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.data_layout + } +} + +impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.global_tcx() + } +} + +impl<'a, 'gcx, 'tcx> HasDataLayout for &'a InferCtxt<'a, 'gcx, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.tcx.global_tcx() + } +} + +pub trait LayoutTyper<'tcx>: HasTyCtxt<'tcx> { + type TyLayout; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout; +} + +impl<'a, 'gcx, 'tcx> LayoutTyper<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> { + type TyLayout = Result, LayoutError<'gcx>>; + + fn layout_of(self, ty: Ty<'gcx>) -> Self::TyLayout { + let ty = normalize_associated_type(self, ty); + + Ok(TyLayout { + ty: ty, + layout: ty.layout(self)?, + variant_index: None + }) + } +} + +impl<'a, 'tcx> TyLayout<'tcx> { + pub fn for_variant(&self, variant_index: usize) -> Self { + TyLayout { + variant_index: Some(variant_index), + ..*self + } + } + + pub fn field_offset(&self, cx: C, i: usize) -> Size { + self.layout.field_offset(cx, i, self.variant_index) + } + + pub fn field_count(&self) -> usize { + // Handle enum/union through the type rather than Layout. + if let ty::TyAdt(def, _) = self.ty.sty { + let v = self.variant_index.unwrap_or(0); + if def.variants.is_empty() { + assert_eq!(v, 0); + return 0; + } else { + return def.variants[v].fields.len(); + } + } + + match *self.layout { + Scalar { .. } => { + bug!("TyLayout::field_count({:?}): not applicable", self) + } + + // Handled above (the TyAdt case). + CEnum { .. } | + General { .. } | + UntaggedUnion { .. } | + RawNullablePointer { .. } | + StructWrappedNullablePointer { .. } => bug!(), + + FatPointer { .. } => 2, + + Vector { count, .. } | + Array { count, .. } => { + let usize_count = count as usize; + assert_eq!(usize_count as u64, count); + usize_count + } + + Univariant { ref variant, .. } => variant.offsets.len(), + } + } + + pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { + let tcx = cx.tcx(); + + let ptr_field_type = |pointee: Ty<'tcx>| { + let slice = |element: Ty<'tcx>| { + assert!(i < 2); + if i == 0 { + tcx.mk_mut_ptr(element) + } else { + tcx.types.usize + } + }; + match tcx.struct_tail(pointee).sty { + ty::TySlice(element) => slice(element), + ty::TyStr => slice(tcx.types.u8), + ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()), + _ => bug!("TyLayout::field_type({:?}): not applicable", self) + } + }; + + match self.ty.sty { + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyFloat(_) | + ty::TyFnPtr(_) | + ty::TyNever | + ty::TyFnDef(..) | + ty::TyDynamic(..) => { + bug!("TyLayout::field_type({:?}): not applicable", self) + } + + // Potentially-fat pointers. + ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { + ptr_field_type(pointee) + } + ty::TyAdt(def, _) if def.is_box() => { + ptr_field_type(self.ty.boxed_ty()) + } + + // Arrays and slices. + ty::TyArray(element, _) | + ty::TySlice(element) => element, + ty::TyStr => tcx.types.u8, + + // Tuples and closures. + ty::TyClosure(def_id, ref substs) => { + substs.upvar_tys(def_id, tcx).nth(i).unwrap() + } + + ty::TyTuple(tys, _) => tys[i], + + // SIMD vector types. + ty::TyAdt(def, ..) if def.repr.simd => { + self.ty.simd_type(tcx) + } + + // ADTs. + ty::TyAdt(def, substs) => { + def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs) + } + + ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | + ty::TyInfer(_) | ty::TyError => { + bug!("TyLayout::field_type: unexpected type `{}`", self.ty) + } + } + } + + pub fn field>(&self, cx: C, i: usize) -> C::TyLayout { + cx.layout_of(self.field_type(cx, i)) + } +} diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 529afe0215e53..2318bb81affe6 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -733,7 +733,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { }); if let Layout::General { ref variants, ref size, discr, .. } = *layout { - let discr_size = Primitive::Int(discr).size(&cx.tcx.data_layout).bytes(); + let discr_size = Primitive::Int(discr).size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", t, size.bytes(), layout); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 453f65eb762f8..7be80a757ca01 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; +use llvm::{self, ValueRef, AttributePlace}; use base; use builder::Builder; use common::{type_is_fat_ptr, C_uint}; @@ -29,19 +29,20 @@ use cabi_sparc; use cabi_sparc64; use cabi_nvptx; use cabi_nvptx64; -use machine::{llalign_of_min, llsize_of, llsize_of_alloc}; +use machine::llalign_of_min; use type_::Type; use type_of; use rustc::hir; use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size}; use libc::c_uint; use std::cmp; +use std::iter; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; -use rustc::ty::layout::Layout; #[derive(Clone, Copy, PartialEq, Debug)] enum ArgKind { @@ -132,33 +133,293 @@ impl ArgAttributes { } } } +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum RegKind { + Integer, + Float, + Vector +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Reg { + pub kind: RegKind, + pub size: Size, +} + +macro_rules! reg_ctor { + ($name:ident, $kind:ident, $bits:expr) => { + pub fn $name() -> Reg { + Reg { + kind: RegKind::$kind, + size: Size::from_bits($bits) + } + } + } +} + +impl Reg { + reg_ctor!(i8, Integer, 8); + reg_ctor!(i16, Integer, 16); + reg_ctor!(i32, Integer, 32); + reg_ctor!(i64, Integer, 64); + + reg_ctor!(f32, Float, 32); + reg_ctor!(f64, Float, 64); +} + +impl Reg { + fn llvm_type(&self, ccx: &CrateContext) -> Type { + match self.kind { + RegKind::Integer => Type::ix(ccx, self.size.bits()), + RegKind::Float => { + match self.size.bits() { + 32 => Type::f32(ccx), + 64 => Type::f64(ccx), + _ => bug!("unsupported float: {:?}", self) + } + } + RegKind::Vector => { + Type::vector(&Type::i8(ccx), self.size.bytes()) + } + } + } +} + +/// An argument passed entirely registers with the +/// same kind (e.g. HFA / HVA on PPC64 and AArch64). +#[derive(Copy, Clone)] +pub struct Uniform { + pub unit: Reg, + + /// The total size of the argument, which can be: + /// * equal to `unit.size` (one scalar/vector) + /// * a multiple of `unit.size` (an array of scalar/vectors) + /// * if `unit.kind` is `Integer`, the last element + /// can be shorter, i.e. `{ i64, i64, i32 }` for + /// 64-bit integers with a total size of 20 bytes + pub total: Size, +} + +impl From for Uniform { + fn from(unit: Reg) -> Uniform { + Uniform { + unit, + total: unit.size + } + } +} + +impl Uniform { + fn llvm_type(&self, ccx: &CrateContext) -> Type { + let llunit = self.unit.llvm_type(ccx); + + if self.total <= self.unit.size { + return llunit; + } + + let count = self.total.bytes() / self.unit.size.bytes(); + let rem_bytes = self.total.bytes() % self.unit.size.bytes(); + + if rem_bytes == 0 { + return Type::array(&llunit, count); + } + + // Only integers can be really split further. + assert_eq!(self.unit.kind, RegKind::Integer); + + let args: Vec<_> = (0..count).map(|_| llunit) + .chain(iter::once(Type::ix(ccx, rem_bytes * 8))) + .collect(); + + Type::struct_(ccx, &args, false) + } +} + +pub trait LayoutExt<'tcx> { + fn is_aggregate(&self) -> bool; + fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; +} + +impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { + fn is_aggregate(&self) -> bool { + match *self.layout { + Layout::Scalar { .. } | + Layout::RawNullablePointer { .. } | + Layout::CEnum { .. } | + Layout::Vector { .. } => false, + + Layout::Array { .. } | + Layout::FatPointer { .. } | + Layout::Univariant { .. } | + Layout::UntaggedUnion { .. } | + Layout::General { .. } | + Layout::StructWrappedNullablePointer { .. } => true + } + } + + fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { + match *self.layout { + // The primitives for this algorithm. + Layout::Scalar { value, .. } | + Layout::RawNullablePointer { value, .. } => { + let kind = match value { + layout::Int(_) | + layout::Pointer => RegKind::Integer, + layout::F32 | + layout::F64 => RegKind::Float + }; + Some(Reg { + kind, + size: self.size(ccx) + }) + } + + Layout::CEnum { .. } => { + Some(Reg { + kind: RegKind::Integer, + size: self.size(ccx) + }) + } + + Layout::Vector { .. } => { + Some(Reg { + kind: RegKind::Integer, + size: self.size(ccx) + }) + } + + Layout::Array { count, .. } => { + if count > 0 { + self.field(ccx, 0).homogenous_aggregate(ccx) + } else { + None + } + } + + Layout::Univariant { ref variant, .. } => { + let mut unaligned_offset = Size::from_bytes(0); + let mut result = None; + + for i in 0..self.field_count() { + if unaligned_offset != variant.offsets[i] { + return None; + } + + let field = self.field(ccx, i); + match (result, field.homogenous_aggregate(ccx)) { + // The field itself must be a homogenous aggregate. + (_, None) => return None, + // If this is the first field, record the unit. + (None, Some(unit)) => { + result = Some(unit); + } + // For all following fields, the unit must be the same. + (Some(prev_unit), Some(unit)) => { + if prev_unit != unit { + return None; + } + } + } + + // Keep track of the offset (without padding). + let size = field.size(ccx); + match unaligned_offset.checked_add(size, ccx) { + Some(offset) => unaligned_offset = offset, + None => return None + } + } + + // There needs to be no padding. + if unaligned_offset != self.size(ccx) { + None + } else { + result + } + } + + Layout::UntaggedUnion { .. } => { + let mut max = Size::from_bytes(0); + let mut result = None; + + for i in 0..self.field_count() { + let field = self.field(ccx, i); + match (result, field.homogenous_aggregate(ccx)) { + // The field itself must be a homogenous aggregate. + (_, None) => return None, + // If this is the first field, record the unit. + (None, Some(unit)) => { + result = Some(unit); + } + // For all following fields, the unit must be the same. + (Some(prev_unit), Some(unit)) => { + if prev_unit != unit { + return None; + } + } + } + + // Keep track of the offset (without padding). + let size = field.size(ccx); + if size > max { + max = size; + } + } + + // There needs to be no padding. + if max != self.size(ccx) { + None + } else { + result + } + } + + // Rust-specific types, which we can ignore for C ABIs. + Layout::FatPointer { .. } | + Layout::General { .. } | + Layout::StructWrappedNullablePointer { .. } => None + } + } +} + +pub enum CastTarget { + Uniform(Uniform), + Pair(Reg, Reg) +} + +impl From for CastTarget { + fn from(unit: Reg) -> CastTarget { + CastTarget::Uniform(Uniform::from(unit)) + } +} + +impl From for CastTarget { + fn from(uniform: Uniform) -> CastTarget { + CastTarget::Uniform(uniform) + } +} + +impl CastTarget { + fn llvm_type(&self, ccx: &CrateContext) -> Type { + match *self { + CastTarget::Uniform(u) => u.llvm_type(ccx), + CastTarget::Pair(a, b) => { + Type::struct_(ccx, &[ + a.llvm_type(ccx), + b.llvm_type(ccx) + ], false) + } + } + } +} /// Information about how a specific C type /// should be passed to or returned from a function /// /// This is borrowed from clang's ABIInfo.h #[derive(Clone, Copy, Debug)] -pub struct ArgType { +pub struct ArgType<'tcx> { kind: ArgKind, - /// Original LLVM type - pub original_ty: Type, - /// Sizing LLVM type (pointers are opaque). - /// Unlike original_ty, this is guaranteed to be complete. - /// - /// For example, while we're computing the function pointer type in - /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`. - /// The field type will likely end up being `void(%Foo)*`, but we cannot - /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`, - /// until `%Foo` is completed by having all of its field types inserted, - /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers - /// with opaque ones, resulting in `{i8*}` for `Foo`. - /// ABI-specific logic can then look at the size, alignment and fields of - /// `{i8*}` in order to determine how the argument will be passed. - /// Only later will `original_ty` aka `%Foo` be used in the LLVM function - /// pointer type, without ever having introspected it. - pub ty: Type, - /// Signedness for integer types, None for other types - pub signedness: Option, + pub layout: TyLayout<'tcx>, /// Coerced LLVM Type pub cast: Option, /// Dummy argument, which is emitted before the real argument @@ -167,26 +428,24 @@ pub struct ArgType { pub attrs: ArgAttributes } -impl ArgType { - fn new(original_ty: Type, ty: Type) -> ArgType { +impl<'a, 'tcx> ArgType<'tcx> { + fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { ArgType { kind: ArgKind::Direct, - original_ty: original_ty, - ty: ty, - signedness: None, + layout: layout, cast: None, pad: None, attrs: ArgAttributes::default() } } - pub fn make_indirect(&mut self, ccx: &CrateContext) { + pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) { assert_eq!(self.kind, ArgKind::Direct); // Wipe old attributes, likely not valid through indirection. self.attrs = ArgAttributes::default(); - let llarg_sz = llsize_of_alloc(ccx, self.ty); + let llarg_sz = self.layout.size(ccx).bytes(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also @@ -205,17 +464,44 @@ impl ArgType { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - if let Some(signed) = self.signedness { - if self.ty.int_width() < bits { - self.attrs.set(if signed { - ArgAttribute::SExt - } else { - ArgAttribute::ZExt - }); + let (i, signed) = match *self.layout { + Layout::Scalar { value, .. } => { + match value { + layout::Int(i) => { + if self.layout.ty.is_integral() { + (i, self.layout.ty.is_signed()) + } else { + return; + } + } + _ => return + } } + + // Rust enum types that map onto C enums also need to follow + // the target ABI zero-/sign-extension rules. + Layout::CEnum { discr, signed, .. } => (discr, signed), + + _ => return + }; + + if i.size().bits() < bits { + self.attrs.set(if signed { + ArgAttribute::SExt + } else { + ArgAttribute::ZExt + }); } } + pub fn cast_to>(&mut self, ccx: &CrateContext, target: T) { + self.cast = Some(target.into().llvm_type(ccx)); + } + + pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) { + self.pad = Some(reg.llvm_type(ccx)); + } + pub fn is_indirect(&self) -> bool { self.kind == ArgKind::Indirect } @@ -224,18 +510,24 @@ impl ArgType { self.kind == ArgKind::Ignore } + /// Get the LLVM type for an lvalue of the original Rust type of + /// this argument/return, i.e. the result of `type_of::type_of`. + pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + type_of::type_of(ccx, self.layout.ty) + } + /// Store a direct/indirect value described by this ArgType into a /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } let ccx = bcx.ccx; if self.is_indirect() { - let llsz = llsize_of(ccx, self.ty); - let llalign = llalign_of_min(ccx, self.ty); + let llsz = C_uint(ccx, self.layout.size(ccx).bytes()); + let llalign = self.layout.align(ccx).abi(); base::call_memcpy(bcx, dst, val, llsz, llalign as u32); } else if let Some(ty) = self.cast { // FIXME(eddyb): Figure out when the simpler Store is safe, clang @@ -243,8 +535,8 @@ impl ArgType { let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { let cast_dst = bcx.pointercast(dst, ty.ptr_to()); - let llalign = llalign_of_min(ccx, self.ty); - bcx.store(val, cast_dst, Some(llalign)); + let llalign = self.layout.align(ccx).abi(); + bcx.store(val, cast_dst, Some(llalign as u32)); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -271,21 +563,21 @@ impl ArgType { base::call_memcpy(bcx, bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_uint(ccx, llsize_of_alloc(ccx, self.ty)), - cmp::min(llalign_of_min(ccx, self.ty), - llalign_of_min(ccx, ty)) as u32); + C_uint(ccx, self.layout.size(ccx).bytes()), + cmp::min(self.layout.align(ccx).abi() as u32, + llalign_of_min(ccx, ty))); base::Lifetime::End.call(bcx, llscratch); } } else { - if self.original_ty == Type::i1(ccx) { + if self.layout.ty == ccx.tcx().types.bool { val = bcx.zext(val, Type::i8(ccx)); } bcx.store(val, dst, None); } } - pub fn store_fn_arg(&self, bcx: &Builder, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) { if self.pad.is_some() { *idx += 1; } @@ -304,30 +596,30 @@ impl ArgType { /// I will do my best to describe this structure, but these /// comments are reverse-engineered and may be inaccurate. -NDM #[derive(Clone, Debug)] -pub struct FnType { +pub struct FnType<'tcx> { /// The LLVM types of each argument. - pub args: Vec, + pub args: Vec>, /// LLVM return type. - pub ret: ArgType, + pub ret: ArgType<'tcx>, pub variadic: bool, pub cconv: llvm::CallConv } -impl FnType { - pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>, - extra_args: &[Ty<'tcx>]) -> FnType { +impl<'a, 'tcx> FnType<'tcx> { + pub fn new(ccx: &CrateContext<'a, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); fn_ty.adjust_for_abi(ccx, sig); fn_ty } - pub fn new_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>, - extra_args: &[Ty<'tcx>]) -> FnType { + pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); // Don't pass the vtable, it's not an argument of the virtual fn. fn_ty.args[1].ignore(); @@ -335,9 +627,9 @@ impl FnType { fn_ty } - fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>, - extra_args: &[Ty<'tcx>]) -> FnType { + pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { use self::Abi::*; let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) { RustIntrinsic | PlatformIntrinsic | @@ -394,23 +686,11 @@ impl FnType { }; let arg_of = |ty: Ty<'tcx>, is_return: bool| { + let mut arg = ArgType::new(ccx.layout_of(ty)); if ty.is_bool() { - let llty = Type::i1(ccx); - let mut arg = ArgType::new(llty, llty); arg.attrs.set(ArgAttribute::ZExt); - arg } else { - let mut arg = ArgType::new(type_of::type_of(ccx, ty), - type_of::sizing_type_of(ccx, ty)); - if ty.is_integral() { - arg.signedness = Some(ty.is_signed()); - } - // Rust enum types that map onto C enums also need to follow - // the target ABI zero-/sign-extension rules. - if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) { - arg.signedness = Some(signed); - } - if llsize_of_alloc(ccx, arg.ty) == 0 { + if arg.layout.size(ccx).bytes() == 0 { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu. @@ -419,8 +699,8 @@ impl FnType { arg.ignore(); } } - arg } + arg }; let ret_ty = sig.output(); @@ -439,14 +719,10 @@ impl FnType { match ret_ty.sty { // These are not really pointers but pairs, (pointer, len) ty::TyRef(_, ty::TypeAndMut { ty, .. }) => { - let llty = type_of::sizing_type_of(ccx, ty); - let llsz = llsize_of_alloc(ccx, llty); - ret.attrs.set_dereferenceable(llsz); + ret.attrs.set_dereferenceable(ccx.size_of(ty)); } ty::TyAdt(def, _) if def.is_box() => { - let llty = type_of::sizing_type_of(ccx, ret_ty.boxed_ty()); - let llsz = llsize_of_alloc(ccx, llty); - ret.attrs.set_dereferenceable(llsz); + ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty())); } _ => {} } @@ -495,13 +771,9 @@ impl FnType { for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if type_is_fat_ptr(ccx, ty) { - let original_tys = arg.original_ty.field_types(); - let sizing_tys = arg.ty.field_types(); - assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); - - let mut data = ArgType::new(original_tys[0], sizing_tys[0]); - let mut info = ArgType::new(original_tys[1], sizing_tys[1]); + if let ty::layout::FatPointer { .. } = *arg.layout { + let mut data = ArgType::new(arg.layout.field(ccx, 0)); + let mut info = ArgType::new(arg.layout.field(ccx, 1)); if let Some(inner) = rust_ptr_attrs(ty, &mut data) { data.attrs.set(ArgAttribute::NonNull); @@ -517,9 +789,7 @@ impl FnType { args.push(info); } else { if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { - let llty = type_of::sizing_type_of(ccx, inner); - let llsz = llsize_of_alloc(ccx, llty); - arg.attrs.set_dereferenceable(llsz); + arg.attrs.set_dereferenceable(ccx.size_of(inner)); } args.push(arg); } @@ -533,43 +803,51 @@ impl FnType { } } - fn adjust_for_abi<'a, 'tcx>(&mut self, - ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>) { + fn adjust_for_abi(&mut self, + ccx: &CrateContext<'a, 'tcx>, + sig: ty::FnSig<'tcx>) { let abi = sig.abi; if abi == Abi::Unadjusted { return } if abi == Abi::Rust || abi == Abi::RustCall || abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { - let fixup = |arg: &mut ArgType| { - let mut llty = arg.ty; - - // Replace newtypes with their inner-most type. - while llty.kind() == llvm::TypeKind::Struct { - let inner = llty.field_types(); - if inner.len() != 1 { - break; - } - llty = inner[0]; + let fixup = |arg: &mut ArgType<'tcx>| { + if !arg.layout.is_aggregate() { + return; } - if !llty.is_aggregate() { - // Scalars and vectors, always immediate. - if llty != arg.ty { + let size = arg.layout.size(ccx); + + if let Some(unit) = arg.layout.homogenous_aggregate(ccx) { + // Replace newtypes with their inner-most type. + if unit.size == size { // Needs a cast as we've unpacked a newtype. - arg.cast = Some(llty); + arg.cast_to(ccx, unit); + return; + } + + // Pairs of floats. + if unit.kind == RegKind::Float { + if unit.size.checked_mul(2, ccx) == Some(size) { + // FIXME(eddyb) This should be using Uniform instead of a pair, + // but the resulting [2 x float/double] breaks emscripten. + // See https://github.com/kripken/emscripten-fastcomp/issues/178. + arg.cast_to(ccx, CastTarget::Pair(unit, unit)); + return; + } } - return; } - let size = llsize_of_alloc(ccx, llty); - if size > llsize_of_alloc(ccx, ccx.int_type()) { + if size > layout::Pointer.size(ccx) { arg.make_indirect(ccx); - } else if size > 0 { + } else { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. - arg.cast = Some(Type::ix(ccx, size * 8)); + arg.cast_to(ccx, Reg { + kind: RegKind::Integer, + size + }); } }; // Fat pointers are returned by-value. @@ -605,14 +883,7 @@ impl FnType { cabi_x86_64::compute_abi_info(ccx, self); }, "aarch64" => cabi_aarch64::compute_abi_info(ccx, self), - "arm" => { - let flavor = if ccx.sess().target.target.target_os == "ios" { - cabi_arm::Flavor::Ios - } else { - cabi_arm::Flavor::General - }; - cabi_arm::compute_abi_info(ccx, self, flavor); - }, + "arm" => cabi_arm::compute_abi_info(ccx, self), "mips" => cabi_mips::compute_abi_info(ccx, self), "mips64" => cabi_mips64::compute_abi_info(ccx, self), "powerpc" => cabi_powerpc::compute_abi_info(ccx, self), @@ -633,16 +904,18 @@ impl FnType { } } - pub fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { let mut llargument_tys = Vec::new(); let llreturn_ty = if self.ret.is_ignore() { Type::void(ccx) } else if self.ret.is_indirect() { - llargument_tys.push(self.ret.original_ty.ptr_to()); + llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); Type::void(ccx) } else { - self.ret.cast.unwrap_or(self.ret.original_ty) + self.ret.cast.unwrap_or_else(|| { + type_of::immediate_type_of(ccx, self.ret.layout.ty) + }) }; for arg in &self.args { @@ -655,9 +928,11 @@ impl FnType { } let llarg_ty = if arg.is_indirect() { - arg.original_ty.ptr_to() + arg.memory_ty(ccx).ptr_to() } else { - arg.cast.unwrap_or(arg.original_ty) + arg.cast.unwrap_or_else(|| { + type_of::immediate_type_of(ccx, arg.layout.ty) + }) }; llargument_tys.push(llarg_ty); @@ -705,72 +980,6 @@ impl FnType { } } -pub fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type, pointer: usize) -> usize { - let a = ty_align(ty, pointer); - return align_up_to(off, a); -} - -pub fn ty_align(ty: Type, pointer: usize) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => pointer, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t, pointer))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt, pointer) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ty_align(elt, pointer) * len - } - _ => bug!("ty_align: unhandled type") - } -} - -pub fn ty_size(ty: Type, pointer: usize) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => pointer, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t, pointer)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| { - align(s, *t, pointer) + ty_size(*t, pointer) - }); - align(size, ty, pointer) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, pointer); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, pointer); - len * eltsz - }, - _ => bug!("ty_size: unhandled type") - } +pub fn align_up_to(off: u64, a: u64) -> u64 { + (off + a - 1) / a * a } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 5c1ced573402e..0fe180253b5b8 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -46,8 +46,8 @@ use super::Disr; use std; use llvm::{ValueRef, True, IntEQ, IntNE}; -use rustc::ty::layout; -use rustc::ty::{self, Ty, AdtKind}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, LayoutTyper}; use common::*; use builder::Builder; use base; @@ -95,15 +95,6 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { generic_type_of(cx, t, None, false, false) } - -// Pass dst=true if the type you are passing is a DST. Yes, we could figure -// this out, but if you call this on an unsized type without realising it, you -// are going to get the wrong type (it will not include the unsized parts of it). -pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, dst: bool) -> Type { - generic_type_of(cx, t, None, true, dst) -} - pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, name: &str) -> Type { generic_type_of(cx, t, Some(name), false, false) @@ -149,7 +140,11 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; let nnty = monomorphize::field_ty(cx.tcx(), substs, &def.variants[nndiscr as usize].fields[0]); - type_of::sizing_type_of(cx, nnty) + if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) { + Type::i8p(cx) + } else { + type_of::type_of(cx, nnty) + } } layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { let fields = compute_fields(cx, t, nndiscr as usize, false); @@ -181,10 +176,6 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - layout::Vector { element, count } => { - let elem_ty = Type::from_primitive(cx, element); - Type::vector(&elem_ty, count) - } layout::UntaggedUnion { ref variants, .. }=> { // Use alignment-sized ints to fill all the union storage. let size = variants.stride().bytes(); @@ -246,9 +237,8 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { assert_eq!(size%align, 0); assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); let align_units = size/align; - let dl = &cx.tcx().data_layout; let layout_align = layout::Align::from_bytes(align, align).unwrap(); - if let Some(ity) = layout::Integer::for_abi_align(dl, layout_align) { + if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) { Type::array(&Type::from_integer(cx, ity), align_units) } else { Type::array(&Type::vector(&Type::i32(cx), align/4), @@ -259,11 +249,10 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec>, variant: &layout::Struct, - sizing: bool, dst: bool) -> Vec { + sizing: bool, _dst: bool) -> Vec { let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]); if sizing { - fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty)) - .map(|ty| type_of::sizing_type_of(cx, ty)).collect() + bug!() } else { fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect() } @@ -285,11 +274,6 @@ pub fn trans_get_discr<'a, 'tcx>( cast_to: Option, range_assert: bool ) -> ValueRef { - let (def, substs) = match t.sty { - ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), - _ => bug!("{} is not an enum", t) - }; - debug!("trans_get_discr t: {:?}", t); let l = bcx.ccx.layout_of(t); @@ -297,19 +281,17 @@ pub fn trans_get_discr<'a, 'tcx>( layout::CEnum { discr, min, max, .. } => { load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert) } - layout::General { discr, .. } => { + layout::General { discr, ref variants, .. } => { let ptr = bcx.struct_gep(scrutinee, 0); load_discr(bcx, discr, ptr, alignment, - 0, def.variants.len() as u64 - 1, + 0, variants.len() as u64 - 1, range_assert) } layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx, - monomorphize::field_ty(bcx.tcx(), substs, - &def.variants[nndiscr as usize].fields[0])); - bcx.icmp(cmp, bcx.load(scrutinee, alignment.to_align()), C_null(llptrty)) + let discr = bcx.load(scrutinee, alignment.to_align()); + bcx.icmp(cmp, discr, C_null(val_ty(discr))) } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment) @@ -383,9 +365,8 @@ pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: Valu assert_eq!(to, Disr(0)); } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; if to.0 != nndiscr { - let llptrty = type_of::sizing_type_of(bcx.ccx, nnty); + let llptrty = val_ty(val).element_type(); bcx.store(C_null(llptrty), val, None); } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index d204703b77598..574b345218be9 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -59,7 +59,6 @@ use context::{SharedCrateContext, CrateContextList}; use debuginfo; use declare; use machine; -use machine::llsize_of; use meth; use mir; use monomorphize::{self, Instance}; @@ -534,14 +533,13 @@ pub fn memcpy_ty<'a, 'tcx>( ) { let ccx = bcx.ccx; - if type_is_zero_size(ccx, t) { + let size = ccx.size_of(t); + if size == 0 { return; } - let llty = type_of::type_of(ccx, t); - let llsz = llsize_of(ccx, llty); - let llalign = align.unwrap_or_else(|| type_of::align_of(ccx, t)); - call_memcpy(bcx, dst, src, llsz, llalign as u32); + let align = align.unwrap_or_else(|| ccx.align_of(t)); + call_memcpy(bcx, dst, src, C_uint(ccx, size), align); } pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, @@ -1297,8 +1295,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { let type_desc = format!("{:?}", ty); - let overall_size = layout.size(&tcx.data_layout); - let align = layout.align(&tcx.data_layout); + let overall_size = layout.size(tcx); + let align = layout.align(tcx); tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, align, @@ -1334,8 +1332,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { session::FieldInfo { name: field_name.to_string(), offset: offset.bytes(), - size: field_layout.size(&tcx.data_layout).bytes(), - align: field_layout.align(&tcx.data_layout).abi(), + size: field_layout.size(tcx).bytes(), + align: field_layout.align(tcx).abi(), } } } @@ -1345,8 +1343,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { session::VariantInfo { name: Some(name.to_string()), kind: session::SizeKind::Exact, - align: value.align(&tcx.data_layout).abi(), - size: value.size(&tcx.data_layout).bytes(), + align: value.align(tcx).abi(), + size: value.size(tcx).bytes(), fields: vec![], } }; diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs index 59a84439950ba..c8c5af714d92a 100644 --- a/src/librustc_trans/cabi_aarch64.rs +++ b/src/librustc_trans/cabi_aarch64.rs @@ -8,163 +8,99 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; -use abi::{self, FnType, ArgType}; +use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; -use type_::Type; - -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 8) -} - -fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { - fn check_array(ty: Type) -> Option<(Type, u64)> { - let len = ty.array_length() as u64; - if len == 0 { - return None - } - let elt = ty.element_type(); - - // if our element is an HFA/HVA, so are we; multiply members by our len - is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) - } - - fn check_struct(ty: Type) -> Option<(Type, u64)> { - let str_tys = ty.field_types(); - if str_tys.len() == 0 { - return None - } - - let mut prev_base_ty = None; - let mut members = 0; - for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { - match (prev_base_ty, opt_homog_agg) { - // field isn't itself an HFA, so we aren't either - (_, None) => return None, - - // first field - store its type and number of members - (None, Some((field_ty, field_members))) => { - prev_base_ty = Some(field_ty); - members = field_members; - }, - // 2nd or later field - give up if it's a different type; otherwise incr. members - (Some(prev_ty), Some((field_ty, field_members))) => { - if prev_ty != field_ty { - return None; - } - members += field_members; - } - } - } - - // Because of previous checks, we know prev_base_ty is Some(...) because - // 1. str_tys has at least one element; and - // 2. prev_base_ty was filled in (or we would've returned early) - let (base_ty, members) = (prev_base_ty.unwrap(), members); +fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) + -> Option { + arg.layout.homogenous_aggregate(ccx).and_then(|unit| { + let size = arg.layout.size(ccx); - // Ensure there is no padding. - if ty_size(ty) == ty_size(base_ty) * (members as usize) { - Some((base_ty, members)) - } else { - None + // Ensure we have at most four uniquely addressable members. + if size > unit.size.checked_mul(4, ccx).unwrap() { + return None; } - } - let homog_agg = match ty.kind() { - Float => Some((ty, 1)), - Double => Some((ty, 1)), - Array => check_array(ty), - Struct => check_struct(ty), - Vector => match ty_size(ty) { - 4|8 => Some((ty, 1)), - _ => None - }, - _ => None - }; + let valid_unit = match unit.kind { + RegKind::Integer => false, + RegKind::Float => true, + RegKind::Vector => size.bits() == 64 || size.bits() == 128 + }; - // Ensure we have at most four uniquely addressable members - homog_agg.and_then(|(base_ty, members)| { - if members > 0 && members <= 4 { - Some((base_ty, members)) + if valid_unit { + Some(Uniform { + unit, + total: size + }) } else { None } }) } -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { - ret.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, ret) { + ret.cast_to(ccx, uniform); return; } - let size = ty_size(ret.ty); - if size <= 16 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) + let size = ret.layout.size(ccx); + let bits = size.bits(); + if bits <= 128 { + let unit = if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() + } else if bits <= 32 { + Reg::i32() } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + Reg::i64() }; - ret.cast = Some(llty); + + ret.cast_to(ccx, Uniform { + unit, + total: size + }); return; } ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if is_reg_ty(arg.ty) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if !arg.layout.is_aggregate() { arg.extend_integer_width_to(32); return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { - arg.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, arg) { + arg.cast_to(ccx, uniform); return; } - let size = ty_size(arg.ty); - if size <= 16 { - let llty = if size == 0 { - Type::array(&Type::i64(ccx), 0) - } else if size == 1 { - Type::i8(ccx) - } else if size == 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) + let size = arg.layout.size(ccx); + let bits = size.bits(); + if bits <= 128 { + let unit = if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() + } else if bits <= 32 { + Reg::i32() } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + Reg::i64() }; - arg.cast = Some(llty); + + arg.cast_to(ccx, Uniform { + unit, + total: size + }); return; } arg.make_indirect(ccx); } -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - } -} - -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 85b26074bae6d..7a91cad511d6d 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -8,156 +8,53 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; -use abi::{self, align_up_to, FnType, ArgType}; +use abi::{FnType, ArgType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use type_::Type; -use std::cmp; - -pub enum Flavor { - General, - Ios -} - -type TyAlignFn = fn(ty: Type) -> usize; - -fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize { - let a = align_fn(ty); - return align_up_to(off, a); -} - -fn general_ty_align(ty: Type) -> usize { - abi::ty_align(ty, 4) -} - -// For more information see: -// ARMv7 -// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual -// /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html -// ARMv6 -// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual -// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html -fn ios_ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8), - Pointer => 4, - Float => 4, - Double => 4, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ios_ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ios_ty_align(elt) * len - } - _ => bug!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter() - .fold(0, |s, t| { - align(s, *t, align_fn) + ty_size(*t, align_fn) - }); - align(size, ty, align_fn) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, align_fn); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, align_fn); - len * eltsz - } - _ => bug!("ty_size: unhandled type") - } -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); return; } - let size = ty_size(ret.ty, align_fn); - if size <= 4 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) + let size = ret.layout.size(ccx); + let bits = size.bits(); + if bits <= 32 { + let unit = if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() } else { - Type::i32(ccx) + Reg::i32() }; - ret.cast = Some(llty); + ret.cast_to(ccx, Uniform { + unit, + total: size + }); return; } ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) { - if is_reg_ty(arg.ty) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if !arg.layout.is_aggregate() { arg.extend_integer_width_to(32); return; } - let align = align_fn(arg.ty); - let size = ty_size(arg.ty, align_fn); - let llty = if align <= 4 { - Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) - } else { - Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) - }; - arg.cast = Some(llty); + let align = arg.layout.align(ccx).abi(); + let total = arg.layout.size(ccx); + arg.cast_to(ccx, Uniform { + unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, + total + }); } -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - } -} - -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { - let align_fn = match flavor { - Flavor::General => general_ty_align as TyAlignFn, - Flavor::Ios => ios_ty_align as TyAlignFn, - }; - +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret, align_fn); + classify_ret_ty(ccx, &mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg, align_fn); + classify_arg_ty(ccx, arg); } } diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index f410627400c34..f05dda8bce21a 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -8,10 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - -use llvm::{Struct, Array}; -use abi::{FnType, ArgType, ArgAttribute}; +use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform}; use context::CrateContext; // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 @@ -19,31 +16,31 @@ use context::CrateContext; // See the https://github.com/kripken/emscripten-fastcomp-clang repository. // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - match ret.ty.kind() { - Struct => { - let field_types = ret.ty.field_types(); - if field_types.len() == 1 { - ret.cast = Some(field_types[0]); - } else { - ret.make_indirect(ccx); +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if ret.layout.is_aggregate() { + if let Some(unit) = ret.layout.homogenous_aggregate(ccx) { + let size = ret.layout.size(ccx); + if unit.size == size { + ret.cast_to(ccx, Uniform { + unit, + total: size + }); + return; } } - Array => { - ret.make_indirect(ccx); - } - _ => {} + + ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if arg.ty.is_aggregate() { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if arg.layout.is_aggregate() { arg.make_indirect(ccx); arg.attrs.set(ArgAttribute::ByVal); } } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs index 25fe53e7ef40f..b7b60859d4a04 100644 --- a/src/librustc_trans/cabi_mips.rs +++ b/src/librustc_trans/cabi_mips.rs @@ -8,94 +8,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - -use libc::c_uint; use std::cmp; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Vector}; -use abi::{self, align_up_to, ArgType, FnType}; +use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use type_::Type; - -fn ty_align(ty: Type) -> usize { - abi::ty_align(ty, 4) -} -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 4) -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { - let orig_offset = *offset; - let size = ty_size(arg.ty) * 8; - let mut align = ty_align(arg.ty); - +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { + let size = arg.layout.size(ccx); + let mut align = arg.layout.align(ccx).abi(); align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - if !is_reg_ty(arg.ty) { - arg.cast = Some(struct_ty(ccx, arg.ty)); - arg.pad = padding_ty(ccx, align, orig_offset); + if arg.layout.is_aggregate() { + arg.cast_to(ccx, Uniform { + unit: Reg::i32(), + total: size + }); + if ((align - 1) & *offset) > 0 { + arg.pad_with(ccx, Reg::i32()); + } } else { arg.extend_integer_width_to(32); } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - }; -} - -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i32(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i32(ccx); - let mut args = Vec::new(); - - let mut n = size / 32; - while n > 0 { - args.push(int_ty); - n -= 1; - } - let r = size % 32; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); - } - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) + *offset = align_up_to(*offset, align); + *offset += align_up_to(size.bytes(), align); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index e6b500c88dc7a..dff75e628de10 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -8,94 +8,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - -use libc::c_uint; use std::cmp; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Vector}; -use abi::{self, align_up_to, ArgType, FnType}; +use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use type_::Type; - -fn ty_align(ty: Type) -> usize { - abi::ty_align(ty, 8) -} -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 8) -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); } else { ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { - let orig_offset = *offset; - let size = ty_size(arg.ty) * 8; - let mut align = ty_align(arg.ty); - +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { + let size = arg.layout.size(ccx); + let mut align = arg.layout.align(ccx).abi(); align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - if !is_reg_ty(arg.ty) { - arg.cast = Some(struct_ty(ccx, arg.ty)); - arg.pad = padding_ty(ccx, align, orig_offset); + if arg.layout.is_aggregate() { + arg.cast_to(ccx, Uniform { + unit: Reg::i64(), + total: size + }); + if ((align - 1) & *offset) > 0 { + arg.pad_with(ccx, Reg::i64()); + } } else { arg.extend_integer_width_to(64); } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - }; -} - -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i64(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i64(ccx); - let mut args = Vec::new(); - - let mut n = size / 64; - while n > 0 { - args.push(int_ty); - n -= 1; - } - let r = size % 64; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); - } - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) + *offset = align_up_to(*offset, align); + *offset += align_up_to(size.bytes(), align); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_msp430.rs b/src/librustc_trans/cabi_msp430.rs index aa90bb7ab753a..546bb5ad9b44e 100644 --- a/src/librustc_trans/cabi_msp430.rs +++ b/src/librustc_trans/cabi_msp430.rs @@ -11,17 +11,8 @@ // Reference: MSP430 Embedded Application Binary Interface // http://www.ti.com/lit/an/slaa534/slaa534.pdf -#![allow(non_upper_case_globals)] - -use llvm::Struct; - -use abi::{self, ArgType, FnType}; +use abi::{ArgType, FnType, LayoutExt}; use context::CrateContext; -use type_::Type; - -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 2) -} // 3.5 Structures or Unions Passed and Returned by Reference // @@ -29,23 +20,23 @@ fn ty_size(ty: Type) -> usize { // returned by reference. To pass a structure or union by reference, the caller // places its address in the appropriate location: either in a register or on // the stack, according to its position in the argument list. (..)" -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if ret.ty.kind() == Struct && ty_size(ret.ty) > 32 { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { ret.make_indirect(ccx); } else { ret.extend_integer_width_to(16); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if arg.ty.kind() == Struct && ty_size(arg.ty) > 32 { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { arg.make_indirect(ccx); } else { arg.extend_integer_width_to(16); } } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_nvptx.rs b/src/librustc_trans/cabi_nvptx.rs index 5ece19f764a8a..3873752b25470 100644 --- a/src/librustc_trans/cabi_nvptx.rs +++ b/src/librustc_trans/cabi_nvptx.rs @@ -11,35 +11,26 @@ // Reference: PTX Writer's Guide to Interoperability // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability -#![allow(non_upper_case_globals)] - -use llvm::Struct; - -use abi::{self, ArgType, FnType}; +use abi::{ArgType, FnType, LayoutExt}; use context::CrateContext; -use type_::Type; - -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 4) -} -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if ret.ty.kind() == Struct && ty_size(ret.ty) > 32 { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { ret.make_indirect(ccx); } else { ret.extend_integer_width_to(32); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if arg.ty.kind() == Struct && ty_size(arg.ty) > 32 { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { arg.make_indirect(ccx); } else { arg.extend_integer_width_to(32); } } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_nvptx64.rs b/src/librustc_trans/cabi_nvptx64.rs index 880c6cfd7a8ac..24bf4920c16c1 100644 --- a/src/librustc_trans/cabi_nvptx64.rs +++ b/src/librustc_trans/cabi_nvptx64.rs @@ -11,35 +11,26 @@ // Reference: PTX Writer's Guide to Interoperability // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability -#![allow(non_upper_case_globals)] - -use llvm::Struct; - -use abi::{self, ArgType, FnType}; +use abi::{ArgType, FnType, LayoutExt}; use context::CrateContext; -use type_::Type; - -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 8) -} -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if ret.ty.kind() == Struct && ty_size(ret.ty) > 64 { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 { ret.make_indirect(ccx); } else { ret.extend_integer_width_to(64); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if arg.ty.kind() == Struct && ty_size(arg.ty) > 64 { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 { arg.make_indirect(ccx); } else { arg.extend_integer_width_to(64); } } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index 4e1d7a9337827..f951ac76391f6 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -8,100 +8,41 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use libc::c_uint; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Vector}; -use abi::{self, align_up_to, FnType, ArgType}; +use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use type_::Type; use std::cmp; -fn ty_align(ty: Type) -> usize { - if ty.kind() == Vector { - bug!("ty_size: unhandled type") - } else { - abi::ty_align(ty, 4) - } -} - -fn ty_size(ty: Type) -> usize { - if ty.kind() == Vector { - bug!("ty_size: unhandled type") - } else { - abi::ty_size(ty, 4) - } -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { - let orig_offset = *offset; - let size = ty_size(arg.ty) * 8; - let mut align = ty_align(arg.ty); - +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { + let size = arg.layout.size(ccx); + let mut align = arg.layout.align(ccx).abi(); align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - if !is_reg_ty(arg.ty) { - arg.cast = Some(struct_ty(ccx, arg.ty)); - arg.pad = padding_ty(ccx, align, orig_offset); + if arg.layout.is_aggregate() { + arg.cast_to(ccx, Uniform { + unit: Reg::i32(), + total: size + }); + if ((align - 1) & *offset) > 0 { + arg.pad_with(ccx, Reg::i32()); + } } else { arg.extend_integer_width_to(32); } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double => true, - _ => false - }; -} -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i32(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i32(ccx); - let mut args = Vec::new(); - - let mut n = size / 32; - while n > 0 { - args.push(int_ty); - n -= 1; - } - - let r = size % 32; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); - } - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) + *offset = align_up_to(*offset, align); + *offset += align_up_to(size.bytes(), align); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index cdc7c1fd1afb3..c4f8d0b4b9637 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -8,100 +8,42 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// FIXME: The PowerPC64 ABI needs to zero or sign extend function -// call parameters, but compute_abi_info() is passed LLVM types -// which have no sign information. -// +// FIXME: // Alignment of 128 bit types is not currently handled, this will // need to be fixed when PowerPC vector support is added. -use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array}; -use abi::{self, FnType, ArgType}; +use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; -use type_::Type; - -fn ty_size(ty: Type) -> usize { - if ty.kind() == Vector { - bug!("ty_size: unhandled type") - } else { - abi::ty_size(ty, 8) - } -} - -fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { - fn check_array(ty: Type) -> Option<(Type, u64)> { - let len = ty.array_length() as u64; - if len == 0 { - return None - } - let elt = ty.element_type(); - - // if our element is an HFA/HVA, so are we; multiply members by our len - is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) - } - fn check_struct(ty: Type) -> Option<(Type, u64)> { - let str_tys = ty.field_types(); - if str_tys.len() == 0 { - return None - } +fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) + -> Option { + arg.layout.homogenous_aggregate(ccx).and_then(|unit| { + let size = arg.layout.size(ccx); - let mut prev_base_ty = None; - let mut members = 0; - for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { - match (prev_base_ty, opt_homog_agg) { - // field isn't itself an HFA, so we aren't either - (_, None) => return None, - - // first field - store its type and number of members - (None, Some((field_ty, field_members))) => { - prev_base_ty = Some(field_ty); - members = field_members; - }, - - // 2nd or later field - give up if it's a different type; otherwise incr. members - (Some(prev_ty), Some((field_ty, field_members))) => { - if prev_ty != field_ty { - return None; - } - members += field_members; - } - } + // Ensure we have at most eight uniquely addressable members. + if size > unit.size.checked_mul(8, ccx).unwrap() { + return None; } - // Because of previous checks, we know prev_base_ty is Some(...) because - // 1. str_tys has at least one element; and - // 2. prev_base_ty was filled in (or we would've returned early) - let (base_ty, members) = (prev_base_ty.unwrap(), members); - - // Ensure there is no padding. - if ty_size(ty) == ty_size(base_ty) * (members as usize) { - Some((base_ty, members)) - } else { - None - } - } + let valid_unit = match unit.kind { + RegKind::Integer => false, + RegKind::Float => true, + RegKind::Vector => size.bits() == 128 + }; - let homog_agg = match ty.kind() { - Float => Some((ty, 1)), - Double => Some((ty, 1)), - Array => check_array(ty), - Struct => check_struct(ty), - _ => None - }; - - // Ensure we have at most eight uniquely addressable members - homog_agg.and_then(|(base_ty, members)| { - if members > 0 && members <= 8 { - Some((base_ty, members)) + if valid_unit { + Some(Uniform { + unit, + total: size + }) } else { None } }) } -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); return; } @@ -111,78 +53,52 @@ fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { ret.make_indirect(ccx); } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { - ret.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, ret) { + ret.cast_to(ccx, uniform); return; } - let size = ty_size(ret.ty); - if size <= 16 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) + let size = ret.layout.size(ccx); + let bits = size.bits(); + if bits <= 128 { + let unit = if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() + } else if bits <= 32 { + Reg::i32() } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + Reg::i64() }; - ret.cast = Some(llty); + + ret.cast_to(ccx, Uniform { + unit, + total: size + }); return; } ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if is_reg_ty(arg.ty) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if !arg.layout.is_aggregate() { arg.extend_integer_width_to(64); return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { - arg.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, arg) { + arg.cast_to(ccx, uniform); return; } - arg.cast = Some(struct_ty(ccx, arg.ty)); -} - -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double => true, - _ => false - } -} - -fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec { - let long_ty = Type::i64(ccx); - let mut args = Vec::new(); - - let mut n = size / 64; - while n > 0 { - args.push(long_ty); - n -= 1; - } - - let r = size % 64; - if r > 0 { - args.push(Type::ix(ccx, r as u64)); - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_long(ccx, size), false) + let total = arg.layout.size(ccx); + arg.cast_to(ccx, Uniform { + unit: Reg::i64(), + total + }); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 5a666c6083d16..fedebea3f4c99 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -11,130 +11,60 @@ // FIXME: The assumes we're using the non-vector ABI, i.e. compiling // for a pre-z13 machine or using -mno-vx. -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; -use abi::{align_up_to, FnType, ArgType}; +use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use type_::Type; -use std::cmp; +use rustc::ty::layout::{self, Layout, TyLayout}; -fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return align_up_to(off, a); -} - -fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - Vector => ty_size(ty), - _ => bug!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - _ => bug!("ty_size: unhandled type") - } -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { ret.extend_integer_width_to(64); } else { ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if arg.ty.kind() == Struct { - fn is_single_fp_element(tys: &[Type]) -> bool { - if tys.len() != 1 { - return false; - } - match tys[0].kind() { - Float | Double => true, - Struct => is_single_fp_element(&tys[0].field_types()), - _ => false - } - } - - if is_single_fp_element(&arg.ty.field_types()) { - match ty_size(arg.ty) { - 4 => arg.cast = Some(Type::f32(ccx)), - 8 => arg.cast = Some(Type::f64(ccx)), - _ => arg.make_indirect(ccx) - } - } else { - match ty_size(arg.ty) { - 1 => arg.cast = Some(Type::i8(ccx)), - 2 => arg.cast = Some(Type::i16(ccx)), - 4 => arg.cast = Some(Type::i32(ccx)), - 8 => arg.cast = Some(Type::i64(ccx)), - _ => arg.make_indirect(ccx) +fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>) -> bool { + match *layout { + Layout::Scalar { value: layout::F32, .. } | + Layout::Scalar { value: layout::F64, .. } => true, + Layout::Univariant { .. } => { + if layout.field_count() == 1 { + is_single_fp_element(ccx, layout.field(ccx, 0)) + } else { + false } } - return; + _ => false } +} - if is_reg_ty(arg.ty) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + let size = arg.layout.size(ccx); + if !arg.layout.is_aggregate() && size.bits() <= 64 { arg.extend_integer_width_to(64); - } else { - arg.make_indirect(ccx); + return; } -} -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double => ty_size(ty) <= 8, - _ => false + if is_single_fp_element(ccx, arg.layout) { + match size.bytes() { + 4 => arg.cast_to(ccx, Reg::f32()), + 8 => arg.cast_to(ccx, Reg::f64()), + _ => arg.make_indirect(ccx) + } + } else { + match size.bytes() { + 1 => arg.cast_to(ccx, Reg::i8()), + 2 => arg.cast_to(ccx, Reg::i16()), + 4 => arg.cast_to(ccx, Reg::i32()), + 8 => arg.cast_to(ccx, Reg::i64()), + _ => arg.make_indirect(ccx) + } } } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs index 25fe53e7ef40f..c17901e1adebc 100644 --- a/src/librustc_trans/cabi_sparc.rs +++ b/src/librustc_trans/cabi_sparc.rs @@ -8,94 +8,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - -use libc::c_uint; use std::cmp; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Vector}; -use abi::{self, align_up_to, ArgType, FnType}; +use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use type_::Type; - -fn ty_align(ty: Type) -> usize { - abi::ty_align(ty, 4) -} -fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 4) -} - -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { - let orig_offset = *offset; - let size = ty_size(arg.ty) * 8; - let mut align = ty_align(arg.ty); - +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { + let size = arg.layout.size(ccx); + let mut align = arg.layout.align(ccx).abi(); align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - - if !is_reg_ty(arg.ty) { - arg.cast = Some(struct_ty(ccx, arg.ty)); - arg.pad = padding_ty(ccx, align, orig_offset); - } else { - arg.extend_integer_width_to(32); - } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - }; -} - -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i32(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i32(ccx); - let mut args = Vec::new(); - let mut n = size / 32; - while n > 0 { - args.push(int_ty); - n -= 1; - } - - let r = size % 32; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); + if arg.layout.is_aggregate() { + arg.cast_to(ccx, Uniform { + unit: Reg::i32(), + total: size + }); + if ((align - 1) & *offset) > 0 { + arg.pad_with(ccx, Reg::i32()); } + } else { + arg.extend_integer_width_to(32) } - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) + *offset = align_up_to(*offset, align); + *offset += align_up_to(size.bytes(), align); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs index e675cca33d1be..b75fa97f948ec 100644 --- a/src/librustc_trans/cabi_sparc64.rs +++ b/src/librustc_trans/cabi_sparc64.rs @@ -10,170 +10,89 @@ // FIXME: This needs an audit for correctness and completeness. -use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array}; -use abi::{self, FnType, ArgType}; +use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; -use type_::Type; -fn ty_size(ty: Type) -> usize { - if ty.kind() == Vector { - bug!("ty_size: unhandled type") - } else { - abi::ty_size(ty, 8) - } -} - -fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { - fn check_array(ty: Type) -> Option<(Type, u64)> { - let len = ty.array_length() as u64; - if len == 0 { - return None - } - let elt = ty.element_type(); - - // if our element is an HFA/HVA, so are we; multiply members by our len - is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) - } - - fn check_struct(ty: Type) -> Option<(Type, u64)> { - let str_tys = ty.field_types(); - if str_tys.len() == 0 { - return None - } - - let mut prev_base_ty = None; - let mut members = 0; - for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { - match (prev_base_ty, opt_homog_agg) { - // field isn't itself an HFA, so we aren't either - (_, None) => return None, +fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) + -> Option { + arg.layout.homogenous_aggregate(ccx).and_then(|unit| { + let size = arg.layout.size(ccx); - // first field - store its type and number of members - (None, Some((field_ty, field_members))) => { - prev_base_ty = Some(field_ty); - members = field_members; - }, - - // 2nd or later field - give up if it's a different type; otherwise incr. members - (Some(prev_ty), Some((field_ty, field_members))) => { - if prev_ty != field_ty { - return None; - } - members += field_members; - } - } + // Ensure we have at most eight uniquely addressable members. + if size > unit.size.checked_mul(8, ccx).unwrap() { + return None; } - // Because of previous checks, we know prev_base_ty is Some(...) because - // 1. str_tys has at least one element; and - // 2. prev_base_ty was filled in (or we would've returned early) - let (base_ty, members) = (prev_base_ty.unwrap(), members); - - // Ensure there is no padding. - if ty_size(ty) == ty_size(base_ty) * (members as usize) { - Some((base_ty, members)) - } else { - None - } - } - - let homog_agg = match ty.kind() { - Float => Some((ty, 1)), - Double => Some((ty, 1)), - Array => check_array(ty), - Struct => check_struct(ty), - _ => None - }; + let valid_unit = match unit.kind { + RegKind::Integer => false, + RegKind::Float => true, + RegKind::Vector => size.bits() == 128 + }; - // Ensure we have at most eight uniquely addressable members - homog_agg.and_then(|(base_ty, members)| { - if members > 0 && members <= 8 { - Some((base_ty, members)) + if valid_unit { + Some(Uniform { + unit, + total: size + }) } else { None } }) } -fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { - if is_reg_ty(ret.ty) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); return; } - // don't return aggregates in registers - ret.make_indirect(ccx); - - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { - ret.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, ret) { + ret.cast_to(ccx, uniform); return; } - let size = ty_size(ret.ty); - if size <= 16 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) + let size = ret.layout.size(ccx); + let bits = size.bits(); + if bits <= 128 { + let unit = if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() + } else if bits <= 32 { + Reg::i32() } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + Reg::i64() }; - ret.cast = Some(llty); + + ret.cast_to(ccx, Uniform { + unit, + total: size + }); return; } + + // don't return aggregates in registers + ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { - if is_reg_ty(arg.ty) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if !arg.layout.is_aggregate() { arg.extend_integer_width_to(64); return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { - arg.cast = Some(Type::array(&base_ty, members)); + if let Some(uniform) = is_homogenous_aggregate(ccx, arg) { + arg.cast_to(ccx, uniform); return; } - arg.cast = Some(struct_ty(ccx, arg.ty)); -} - -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double => true, - _ => false - } -} - -fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec { - let long_ty = Type::i64(ccx); - let mut args = Vec::new(); - - let mut n = size / 64; - while n > 0 { - args.push(long_ty); - n -= 1; - } - - let r = size % 64; - if r > 0 { - args.push(Type::ix(ccx, r as u64)); - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_long(ccx, size), false) + let total = arg.layout.size(ccx); + arg.cast_to(ccx, Uniform { + unit: Reg::i64(), + total + }); } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { classify_ret_ty(ccx, &mut fty.ret); } diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index fea005f3d77da..9f5520dabe334 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -8,11 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::*; -use abi::{ArgAttribute, FnType}; -use type_::Type; -use super::common::*; -use super::machine::*; +use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; +use common::CrateContext; #[derive(PartialEq)] pub enum Flavor { @@ -20,9 +17,11 @@ pub enum Flavor { Fastcall } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + fty: &mut FnType<'tcx>, + flavor: Flavor) { if !fty.ret.is_ignore() { - if fty.ret.ty.kind() == Struct { + if fty.ret.layout.is_aggregate() { // Returning a structure. Most often, this will use // a hidden first argument. On some platforms, though, // small structs are returned as integers. @@ -33,11 +32,12 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { let t = &ccx.sess().target.target; if t.options.is_like_osx || t.options.is_like_windows || t.options.is_like_openbsd { - match llsize_of_alloc(ccx, fty.ret.ty) { - 1 => fty.ret.cast = Some(Type::i8(ccx)), - 2 => fty.ret.cast = Some(Type::i16(ccx)), - 4 => fty.ret.cast = Some(Type::i32(ccx)), - 8 => fty.ret.cast = Some(Type::i64(ccx)), + let size = fty.ret.layout.size(ccx); + match size.bytes() { + 1 => fty.ret.cast_to(ccx, Reg::i8()), + 2 => fty.ret.cast_to(ccx, Reg::i16()), + 4 => fty.ret.cast_to(ccx, Reg::i32()), + 8 => fty.ret.cast_to(ccx, Reg::i64()), _ => fty.ret.make_indirect(ccx) } } else { @@ -50,7 +50,7 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { for arg in &mut fty.args { if arg.is_ignore() { continue; } - if arg.ty.kind() == Struct { + if arg.layout.is_aggregate() { arg.make_indirect(ccx); arg.attrs.set(ArgAttribute::ByVal); } else { @@ -73,12 +73,15 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { for arg in &mut fty.args { if arg.is_ignore() || arg.is_indirect() { continue; } - if arg.ty.kind() == Float { + // At this point we know this must be a primitive of sorts. + let unit = arg.layout.homogenous_aggregate(ccx).unwrap(); + let size = arg.layout.size(ccx); + assert_eq!(unit.size, size); + if unit.kind == RegKind::Float { continue; } - let size = llbitsize_of_real(ccx, arg.ty); - let size_in_regs = (size + 31) / 32; + let size_in_regs = (size.bits() + 31) / 32; if size_in_regs == 0 { continue; @@ -90,7 +93,7 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { free_regs -= size_in_regs; - if size <= 32 && (arg.ty.kind() == Pointer || arg.ty.kind() == Integer) { + if size.bits() <= 32 && unit.kind == RegKind::Integer { arg.attrs.set(ArgAttribute::InReg); } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 7f2fdbf000b65..cbe170d85834c 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -11,388 +11,250 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp -#![allow(non_upper_case_globals)] -use self::RegClass::*; - -use llvm::{Integer, Pointer, Float, Double}; -use llvm::{Struct, Array, Vector}; -use abi::{self, ArgType, ArgAttribute, FnType}; +use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use type_::Type; - -#[derive(Clone, Copy, PartialEq)] -enum RegClass { - NoClass, - Int, - SSEFs, - SSEFv, - SSEDs, - SSEDv, - SSEInt(/* bitwidth */ u64), - /// Data that can appear in the upper half of an SSE register. - SSEUp, - X87, - X87Up, - ComplexX87, - Memory -} - -trait TypeMethods { - fn is_reg_ty(&self) -> bool; -} - -impl TypeMethods for Type { - fn is_reg_ty(&self) -> bool { - match self.kind() { - Integer | Pointer | Float | Double => true, - _ => false - } - } -} - -impl RegClass { - fn is_sse(&self) -> bool { - match *self { - SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true, - _ => false - } - } -} - -trait ClassList { - fn is_pass_byval(&self) -> bool; - fn is_ret_bysret(&self) -> bool; -} - -impl ClassList for [RegClass] { - fn is_pass_byval(&self) -> bool { - if self.is_empty() { return false; } - - let class = self[0]; - class == Memory - || class == X87 - || class == ComplexX87 - } - fn is_ret_bysret(&self) -> bool { - if self.is_empty() { return false; } +use rustc::ty::layout::{self, Layout, TyLayout, Size}; - self[0] == Memory - } +#[derive(Clone, Copy, PartialEq, Debug)] +enum Class { + None, + Int, + Sse, + SseUp } -fn classify_ty(ty: Type) -> Vec { - fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return (off + a - 1) / a * a; - } - - fn ty_align(ty: Type) -> usize { - abi::ty_align(ty, 8) - } - - fn ty_size(ty: Type) -> usize { - abi::ty_size(ty, 8) - } - - fn all_mem(cls: &mut [RegClass]) { - for elt in cls { - *elt = Memory; - } - } - - fn unify(cls: &mut [RegClass], - i: usize, - newv: RegClass) { - if cls[i] == newv { return } +#[derive(Clone, Copy, Debug)] +struct Memory; - let to_write = match (cls[i], newv) { - (NoClass, _) => newv, - (_, NoClass) => return, +// Currently supported vector size (AVX). +const LARGEST_VECTOR_SIZE: usize = 256; +const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64; - (Memory, _) | - (_, Memory) => Memory, +fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) + -> Result<[Class; MAX_EIGHTBYTES], Memory> { + fn unify(cls: &mut [Class], + off: u64, + c: Class) { + let i = (off / 8) as usize; + let to_write = match (cls[i], c) { + (Class::None, _) => c, + (_, Class::None) => return, - (Int, _) | - (_, Int) => Int, + (Class::Int, _) | + (_, Class::Int) => Class::Int, - (X87, _) | - (X87Up, _) | - (ComplexX87, _) | - (_, X87) | - (_, X87Up) | - (_, ComplexX87) => Memory, + (Class::Sse, _) | + (_, Class::Sse) => Class::Sse, - (SSEFv, SSEUp) | - (SSEFs, SSEUp) | - (SSEDv, SSEUp) | - (SSEDs, SSEUp) | - (SSEInt(_), SSEUp) => return, - - (..) => newv + (Class::SseUp, Class::SseUp) => Class::SseUp }; cls[i] = to_write; } - fn classify_struct(tys: &[Type], - cls: &mut [RegClass], - i: usize, - off: usize, - packed: bool) { - let mut field_off = off; - for ty in tys { - if !packed { - field_off = align(field_off, *ty); + fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>, + cls: &mut [Class], + off: u64) + -> Result<(), Memory> { + if off % layout.align(ccx).abi() != 0 { + if layout.size(ccx).bytes() > 0 { + return Err(Memory); } - classify(*ty, cls, i, field_off); - field_off += ty_size(*ty); + return Ok(()); } - } - fn classify(ty: Type, - cls: &mut [RegClass], ix: usize, - off: usize) { - let t_align = ty_align(ty); - let t_size = ty_size(ty); - - let misalign = off % t_align; - if misalign != 0 { - let mut i = off / 8; - let e = (off + t_size + 7) / 8; - while i < e { - unify(cls, ix + i, Memory); - i += 1; + match *layout { + Layout::Scalar { value, .. } | + Layout::RawNullablePointer { value, .. } => { + let reg = match value { + layout::Int(_) | + layout::Pointer => Class::Int, + layout::F32 | + layout::F64 => Class::Sse + }; + unify(cls, off, reg); } - return; - } - match ty.kind() { - Integer | - Pointer => { - unify(cls, ix + off / 8, Int); + Layout::CEnum { .. } => { + unify(cls, off, Class::Int); } - Float => { - if off % 8 == 4 { - unify(cls, ix + off / 8, SSEFv); - } else { - unify(cls, ix + off / 8, SSEFs); + + Layout::Vector { element, count } => { + unify(cls, off, Class::Sse); + + // everything after the first one is the upper + // half of a register. + let eltsz = element.size(ccx).bytes(); + for i in 1..count { + unify(cls, off + i * eltsz, Class::SseUp); } } - Double => { - unify(cls, ix + off / 8, SSEDs); - } - Struct => { - classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed()); - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - let mut i = 0; - while i < len { - classify(elt, cls, ix, off + i * eltsz); - i += 1; + + Layout::Array { count, .. } => { + if count > 0 { + let elt = layout.field(ccx, 0); + let eltsz = elt.size(ccx).bytes(); + for i in 0..count { + classify(ccx, elt, cls, off + i * eltsz)?; + } } } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - let mut reg = match elt.kind() { - Integer => SSEInt(elt.int_width()), - Float => SSEFv, - Double => SSEDv, - _ => bug!("classify: unhandled vector element type") - }; - let mut i = 0; - while i < len { - unify(cls, ix + (off + i * eltsz) / 8, reg); + Layout::Univariant { ref variant, .. } => { + for i in 0..layout.field_count() { + let field_off = off + variant.offsets[i].bytes(); + classify(ccx, layout.field(ccx, i), cls, field_off)?; + } + } - // everything after the first one is the upper - // half of a register. - reg = SSEUp; - i += 1; + Layout::UntaggedUnion { .. } => { + for i in 0..layout.field_count() { + classify(ccx, layout.field(ccx, i), cls, off)?; } } - _ => bug!("classify: unhandled type") + + Layout::FatPointer { .. } | + Layout::General { .. } | + Layout::StructWrappedNullablePointer { .. } => return Err(Memory) } + + Ok(()) + } + + let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize; + if n > MAX_EIGHTBYTES { + return Err(Memory); } - fn fixup(ty: Type, cls: &mut [RegClass]) { + let mut cls = [Class::None; MAX_EIGHTBYTES]; + classify(ccx, arg.layout, &mut cls, 0)?; + if n > 2 { + if cls[0] != Class::Sse { + return Err(Memory); + } + if cls[1..n].iter().any(|&c| c != Class::SseUp) { + return Err(Memory); + } + } else { let mut i = 0; - let ty_kind = ty.kind(); - let e = cls.len(); - if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) { - if cls[i].is_sse() { + while i < n { + if cls[i] == Class::SseUp { + cls[i] = Class::Sse; + } else if cls[i] == Class::Sse { i += 1; - while i < e { - if cls[i] != SSEUp { - all_mem(cls); - return; - } - i += 1; - } + while i != n && cls[i] == Class::SseUp { i += 1; } } else { - all_mem(cls); - return - } - } else { - while i < e { - if cls[i] == Memory { - all_mem(cls); - return; - } - if cls[i] == X87Up { - // for darwin - // cls[i] = SSEDs; - all_mem(cls); - return; - } - if cls[i] == SSEUp { - cls[i] = SSEDv; - } else if cls[i].is_sse() { - i += 1; - while i != e && cls[i] == SSEUp { i += 1; } - } else if cls[i] == X87 { - i += 1; - while i != e && cls[i] == X87Up { i += 1; } - } else { - i += 1; - } + i += 1; } } } - let words = (ty_size(ty) + 7) / 8; - let mut cls = vec![NoClass; words]; - if words > 4 { - all_mem(&mut cls); - return cls; - } - classify(ty, &mut cls, 0, 0); - fixup(ty, &mut cls); - return cls; + Ok(cls) } -fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type { - fn llvec_len(cls: &[RegClass]) -> usize { - let mut len = 1; - for c in cls { - if *c != SSEUp { - break; - } - len += 1; - } - return len; +fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { + if *i >= cls.len() { + return None; } - let mut tys = Vec::new(); - let mut i = 0; - let e = cls.len(); - while i < e { - match cls[i] { - Int => { - tys.push(Type::i64(ccx)); - } - SSEFv | SSEDv | SSEInt(_) => { - let (elts_per_word, elt_ty) = match cls[i] { - SSEFv => (2, Type::f32(ccx)), - SSEDv => (1, Type::f64(ccx)), - SSEInt(bits) => { - assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64, - "llreg_ty: unsupported SSEInt width {}", bits); - (64 / bits, Type::ix(ccx, bits)) + match cls[*i] { + Class::None => None, + Class::Int => { + *i += 1; + Some(match size { + 1 => Reg::i8(), + 2 => Reg::i16(), + 3 | + 4 => Reg::i32(), + _ => Reg::i64() + }) + } + Class::Sse => { + let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count(); + *i += vec_len; + Some(match size { + 4 => Reg::f32(), + 8 => Reg::f64(), + _ => { + Reg { + kind: RegKind::Vector, + size: Size::from_bytes(vec_len as u64 * 8) } - _ => bug!(), - }; - let vec_len = llvec_len(&cls[i + 1..]); - let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word); - tys.push(vec_ty); - i += vec_len; - continue; - } - SSEFs => { - tys.push(Type::f32(ccx)); - } - SSEDs => { - tys.push(Type::f64(ccx)); - } - _ => bug!("llregtype: unhandled class") + } + }) } - i += 1; + c => bug!("reg_component: unhandled class {:?}", c) } - if tys.len() == 1 && tys[0].kind() == Vector { - // if the type contains only a vector, pass it as that vector. - tys[0] +} + +fn cast_target(cls: &[Class], size: u64) -> CastTarget { + let mut i = 0; + let lo = reg_component(cls, &mut i, size).unwrap(); + let offset = i as u64 * 8; + let target = if size <= offset { + CastTarget::from(lo) } else { - Type::struct_(ccx, &tys, false) - } + let hi = reg_component(cls, &mut i, size - offset).unwrap(); + CastTarget::Pair(lo, hi) + }; + assert_eq!(reg_component(cls, &mut i, 0), None); + target } -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { - fn x86_64_ty(ccx: &CrateContext, - arg: &mut ArgType, - is_mem_cls: F, - ind_attr: Option) - where F: FnOnce(&[RegClass]) -> bool - { - if !arg.ty.is_reg_ty() { - let cls = classify_ty(arg.ty); - if is_mem_cls(&cls) { - arg.make_indirect(ccx); - if let Some(attr) = ind_attr { - arg.attrs.set(attr); +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 + let mut sse_regs = 8; // XMM0-7 + + let mut x86_64_ty = |arg: &mut ArgType<'tcx>, is_arg: bool| { + let cls = classify_arg(ccx, arg); + + let mut needed_int = 0; + let mut needed_sse = 0; + let in_mem = match cls { + Err(Memory) => true, + Ok(ref cls) if is_arg => { + for &c in cls { + match c { + Class::Int => needed_int += 1, + Class::Sse => needed_sse += 1, + _ => {} + } } - } else { - arg.cast = Some(llreg_ty(ccx, &cls)); + arg.layout.is_aggregate() && + (int_regs < needed_int || sse_regs < needed_sse) } - } else { - arg.extend_integer_width_to(32); - } - } + Ok(_) => false + }; - let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 - let mut sse_regs = 8; // XMM0-7 + if in_mem { + // `sret` / `byval` parameter thus one less integer register available + int_regs -= 1; - if !fty.ret.is_ignore() { - x86_64_ty(ccx, &mut fty.ret, |cls| { - if cls.is_ret_bysret() { - // `sret` parameter thus one less register available - int_regs -= 1; - true + arg.make_indirect(ccx); + if is_arg { + arg.attrs.set(ArgAttribute::ByVal); + } + } else { + // split into sized chunks passed individually + int_regs -= needed_int; + sse_regs -= needed_sse; + + if arg.layout.is_aggregate() { + let size = arg.layout.size(ccx).bytes(); + arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size)) } else { - false + arg.extend_integer_width_to(32); } - }, None); + } + }; + + if !fty.ret.is_ignore() { + x86_64_ty(&mut fty.ret, false); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - x86_64_ty(ccx, arg, |cls| { - let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize; - let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize; - let in_mem = cls.is_pass_byval() || - int_regs < needed_int || - sse_regs < needed_sse; - if in_mem { - // `byval` parameter thus one less integer register available - int_regs -= 1; - } else { - // split into sized chunks passed individually - int_regs -= needed_int; - sse_regs -= needed_sse; - } - in_mem - }, Some(ArgAttribute::ByVal)); - - // An integer, pointer, double or float parameter - // thus the above closure passed to `x86_64_ty` won't - // get called. - match arg.ty.kind() { - Integer | Pointer => int_regs -= 1, - Double | Float => sse_regs -= 1, - _ => {} - } + x86_64_ty(arg, true); } } diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index a849f38247380..39e728d4e4f9b 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -8,30 +8,33 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::*; -use super::common::*; -use super::machine::*; -use abi::{ArgType, FnType}; -use type_::Type; +use abi::{ArgType, FnType, LayoutExt, Reg}; +use common::CrateContext; + +use rustc::ty::layout::Layout; // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx -pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { - let fixup = |a: &mut ArgType| { - match a.ty.kind() { - Struct => match llsize_of_alloc(ccx, a.ty) { - 1 => a.cast = Some(Type::i8(ccx)), - 2 => a.cast = Some(Type::i16(ccx)), - 4 => a.cast = Some(Type::i32(ccx)), - 8 => a.cast = Some(Type::i64(ccx)), - _ => a.make_indirect(ccx) - }, - Integer => match llsize_of_alloc(ccx, a.ty) { - 1 ... 8 => a.extend_integer_width_to(32), - 16 => a.make_indirect(ccx), - _ => bug!(), - }, - _ => (), +pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let fixup = |a: &mut ArgType<'tcx>| { + let size = a.layout.size(ccx); + if a.layout.is_aggregate() { + match size.bits() { + 8 => a.cast_to(ccx, Reg::i8()), + 16 => a.cast_to(ccx, Reg::i16()), + 32 => a.cast_to(ccx, Reg::i32()), + 64 => a.cast_to(ccx, Reg::i64()), + _ => a.make_indirect(ccx) + }; + } else { + if let Layout::Vector { .. } = *a.layout { + // FIXME(eddyb) there should be a size cap here + // (probably what clang calls "illegal vectors"). + } else if size.bytes() > 8 { + a.make_indirect(ccx); + } else { + a.extend_integer_width_to(32); + } } }; diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index a0906bb02f5a3..5d58c93538922 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -27,7 +27,7 @@ use monomorphize; use type_::Type; use value::Value; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::Layout; +use rustc::ty::layout::{Layout, LayoutTyper}; use rustc::ty::subst::{Subst, Substs}; use rustc::hir; @@ -63,7 +63,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - Layout::UntaggedUnion { .. } | Layout::RawNullablePointer { .. } | Layout::StructWrappedNullablePointer { .. } => { - !layout.is_unsized() && layout.size(&ccx.tcx().data_layout).bytes() == 0 + !layout.is_unsized() && layout.size(ccx).bytes() == 0 } } } @@ -125,10 +125,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) /// Identify types which have size zero at runtime. pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - use machine::llsize_of_alloc; - use type_of::sizing_type_of; - let llty = sizing_type_of(ccx, ty); - llsize_of_alloc(ccx, llty) == 0 + let layout = ccx.layout_of(ty); + !layout.is_unsized() && layout.size(ccx).bytes() == 0 } /* diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index daf1a1ba95f9a..6b6fa538dc03b 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -255,7 +255,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ccx.statics_to_rauw().borrow_mut().push((g, new_g)); new_g }; - llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); + llvm::LLVMSetAlignment(g, ccx.align_of(ty)); llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index afb94f546abe8..98fbb64fd5540 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -28,6 +28,7 @@ use type_::Type; use rustc_data_structures::base_n; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{LayoutTyper, TyLayout}; use session::config::NoDebugInfo; use session::Session; use session::config; @@ -828,18 +829,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { TypeOfDepthLock(self.local()) } - pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout { - self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| { - ty.layout(&infcx).unwrap_or_else(|e| { - match e { - ty::layout::LayoutError::SizeOverflow(_) => - self.sess().fatal(&e.to_string()), - _ => bug!("failed to get layout for `{}`: {}", ty, e) - } - }) - }) - } - pub fn check_overflow(&self) -> bool { self.shared.check_overflow } @@ -951,6 +940,54 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { } } +impl<'a, 'tcx> ty::layout::HasDataLayout for &'a SharedCrateContext<'a, 'tcx> { + fn data_layout(&self) -> &ty::layout::TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.tcx + } +} + +impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> { + fn data_layout(&self) -> &ty::layout::TargetDataLayout { + &self.shared.tcx.data_layout + } +} + +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.shared.tcx + } +} + +impl<'a, 'tcx> LayoutTyper<'tcx> for &'a SharedCrateContext<'a, 'tcx> { + type TyLayout = TyLayout<'tcx>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| { + infcx.layout_of(ty).unwrap_or_else(|e| { + match e { + ty::layout::LayoutError::SizeOverflow(_) => + self.sess().fatal(&e.to_string()), + _ => bug!("failed to get layout for `{}`: {}", ty, e) + } + }) + }) + } +} + +impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> { + type TyLayout = TyLayout<'tcx>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + self.shared.layout_of(ty) + } +} + pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> { diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 049178a2575f3..ccb693aa41f4c 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -35,7 +35,8 @@ use rustc_data_structures::ToHex; use {type_of, machine, monomorphize}; use common::{self, CrateContext}; use type_::Type; -use rustc::ty::{self, AdtKind, Ty, layout}; +use rustc::ty::{self, AdtKind, Ty}; +use rustc::ty::layout::{self, LayoutTyper}; use session::config; use util::nodemap::FxHashMap; use util::common::path2cstr; @@ -900,7 +901,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { let offsets = match *layout { layout::Univariant { ref variant, .. } => &variant.offsets, layout::Vector { element, count } => { - let element_size = element.size(&cx.tcx().data_layout).bytes(); + let element_size = element.size(cx).bytes(); tmp = (0..count). map(|i| layout::Size::from_bytes(i*element_size)) .collect::>(); @@ -1564,7 +1565,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type: enum_type, - type_rep: type_rep, + type_rep: type_rep.layout, discriminant_type_metadata: discriminant_type_metadata, containing_scope: containing_scope, file_metadata: file_metadata, @@ -1772,7 +1773,7 @@ pub fn create_global_var_metadata(cx: &CrateContext, let var_name = CString::new(var_name).unwrap(); let linkage_name = CString::new(linkage_name).unwrap(); - let global_align = type_of::align_of(cx, variable_type); + let global_align = cx.align_of(variable_type); unsafe { llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 8e86b50b3f7dd..1b7cf26853bc1 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -449,7 +449,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, LocalVariable | CapturedVariable => (0, DW_TAG_auto_variable) }; - let align = ::type_of::align_of(cx, variable_type); + let align = cx.align_of(variable_type); let name = CString::new(variable_name.as_str().as_bytes()).unwrap(); match (variable_access, &[][..]) { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 41a9ab2842dcd..59876a7f2a201 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -18,11 +18,10 @@ use llvm; use llvm::{ValueRef}; use rustc::traits; use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::layout::LayoutTyper; use common::*; -use machine::*; use meth; use monomorphize; -use type_of::{sizing_type_of, align_of}; use value::Value; use builder::Builder; @@ -49,7 +48,7 @@ pub fn needs_drop_glue<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx> if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) { scx.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| { let layout = t.layout(&infcx).unwrap(); - if layout.size(&scx.tcx().data_layout).bytes() == 0 { + if layout.size(scx).bytes() == 0 { // `Box` does not allocate. false } else { @@ -69,9 +68,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); if bcx.ccx.shared().type_is_sized(t) { - let sizing_type = sizing_type_of(bcx.ccx, t); - let size = llsize_of_alloc(bcx.ccx, sizing_type); - let align = align_of(bcx.ccx, t); + let size = bcx.ccx.size_of(t); + let align = bcx.ccx.align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", t, Value(info), size, align); let size = C_uint(bcx.ccx, size); @@ -82,9 +80,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf ty::TyAdt(def, substs) => { let ccx = bcx.ccx; // First get the size of all statically known fields. - // Don't use type_of::sizing_type_of because that expects t to be sized, - // and it also rounds up to alignment, which we want to avoid, - // as the unsized field's alignment could be smaller. + // Don't use size_of because it also rounds up to alignment, which we + // want to avoid, as the unsized field's alignment could be smaller. assert!(!t.is_simd()); let layout = ccx.layout_of(t); debug!("DST {} layout: {:?}", t, layout); @@ -154,14 +151,11 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) } ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(bcx.tcx()); + let unit = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); - let unit_align = llalign_of_min(bcx.ccx, llunit_ty); - let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty); - (bcx.mul(info, C_uint(bcx.ccx, unit_size)), - C_uint(bcx.ccx, unit_align)) + (bcx.mul(info, C_uint(bcx.ccx, bcx.ccx.size_of(unit))), + C_uint(bcx.ccx, bcx.ccx.align_of(unit))) } _ => bug!("Unexpected unsized type, found {}", t) } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 762bf8592ffcc..5e7d612d17f82 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -151,7 +151,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "min_align_of" => { let tp_ty = substs.type_at(0); - C_uint(ccx, type_of::align_of(ccx, tp_ty)) + C_uint(ccx, ccx.align_of(tp_ty)) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -160,7 +160,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { - C_uint(ccx, type_of::align_of(ccx, tp_ty)) + C_uint(ccx, ccx.align_of(tp_ty)) } } "pref_align_of" => { @@ -234,7 +234,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } let load = bcx.volatile_load(ptr); unsafe { - llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); + llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty)); } to_immediate(bcx, load, tp_ty) }, @@ -252,7 +252,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); unsafe { - llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); + llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty)); } } C_nil(ccx) @@ -634,7 +634,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if let Some(ty) = fn_ty.ret.cast { let ptr = bcx.pointercast(llresult, ty.ptr_to()); - bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty))); + bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); } @@ -651,7 +651,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, -> ValueRef { let ccx = bcx.ccx; let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); + let align = C_i32(ccx, ccx.align_of(tp_ty) as i32); let size = machine::llsize_of(ccx, lltp_ty); let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); @@ -685,7 +685,7 @@ fn memset_intrinsic<'a, 'tcx>( count: ValueRef ) -> ValueRef { let ccx = bcx.ccx; - let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); + let align = C_i32(ccx, ccx.align_of(ty) as i32); let lltp_ty = type_of::type_of(ccx, ty); let size = machine::llsize_of(ccx, lltp_ty); let dst = bcx.pointercast(dst, Type::i8p(ccx)); diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 75ab407614050..f5f924178589a 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -17,7 +17,6 @@ use consts; use machine; use monomorphize; use type_::Type; -use type_of::*; use value::Value; use rustc::ty; @@ -80,14 +79,10 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Not in the cache. Build it. let nullptr = C_null(Type::nil(ccx).ptr_to()); - let size_ty = sizing_type_of(ccx, ty); - let size = machine::llsize_of_alloc(ccx, size_ty); - let align = align_of(ccx, ty); - let mut components: Vec<_> = [ callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)), - C_uint(ccx, size), - C_uint(ccx, align) + C_uint(ccx, ccx.size_of(ty)), + C_uint(ccx, ccx.align_of(ty)) ].iter().cloned().collect(); if let Some(trait_ref) = trait_ref { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index d69f31a45048d..caec4789eddce 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -12,7 +12,8 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc::middle::lang_items; use rustc::middle::const_val::ConstInt; -use rustc::ty::{self, layout, TypeFoldable}; +use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::{self, LayoutTyper}; use rustc::mir; use abi::{Abi, FnType, ArgType}; use base::{self, Lifetime}; @@ -24,8 +25,8 @@ use consts; use machine::llalign_of_min; use meth; use monomorphize; +use type_of; use tvec; -use type_of::{self, align_of}; use type_::Type; use rustc_data_structures::indexed_vec::IndexVec; @@ -177,7 +178,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = bcx.alloca(ret.original_ty, "ret"); + let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret"); self.store_operand(&bcx, llscratch, None, op); llscratch } @@ -189,7 +190,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let load = bcx.load( bcx.pointercast(llslot, cast_ty.ptr_to()), - Some(llalign_of_min(bcx.ccx, ret.ty))); + Some(ret.layout.align(bcx.ccx).abi() as u32)); load } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); @@ -515,7 +516,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (llargs[0], &llargs[1..]) } ReturnDest::Nothing => { - (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) + (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => (dst, &llargs[..]), @@ -534,7 +535,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { val: Ref(dst, Alignment::AbiAligned), ty: sig.output(), }; - self.store_return(&bcx, ret_dest, fn_ty.ret, op); + self.store_return(&bcx, ret_dest, &fn_ty.ret, op); } if let Some((_, target)) = *destination { @@ -573,7 +574,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { val: Immediate(invokeret), ty: sig.output(), }; - self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); + self.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -583,7 +584,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { val: Immediate(llret), ty: sig.output(), }; - self.store_return(&bcx, ret_dest, fn_ty.ret, op); + self.store_return(&bcx, ret_dest, &fn_ty.ret, op); funclet_br(self, bcx, target); } else { bcx.unreachable(); @@ -597,7 +598,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, - fn_ty: &FnType, + fn_ty: &FnType<'tcx>, next_idx: &mut usize, llfn: &mut Option, def: &Option>) { @@ -640,7 +641,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.alloca(arg.original_ty, "arg"); + let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg"); self.store_operand(bcx, llscratch, None, op); (llscratch, Alignment::AbiAligned, true) } else { @@ -652,7 +653,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let llscratch = bcx.alloca(arg.original_ty, "arg"); + let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg"); base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); (llscratch, Alignment::AbiAligned, true) } @@ -661,13 +662,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx) { + if arg.layout.ty == bcx.tcx().types.bool { // We store bools as i8 so we need to truncate to i1. llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - llval = bcx.trunc(llval, arg.original_ty); + llval = bcx.trunc(llval, Type::i1(bcx.ccx)); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), - align.min_with(llalign_of_min(bcx.ccx, arg.ty))); + align.min_with(arg.layout.align(bcx.ccx).abi() as u32)); } else { llval = bcx.load(llval, align.to_align()); } @@ -680,7 +681,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, - fn_ty: &FnType, + fn_ty: &FnType<'tcx>, next_idx: &mut usize, llfn: &mut Option, def: &Option>) { @@ -910,7 +911,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let in_type = val.ty; let out_type = dst.ty.to_ty(bcx.tcx());; - let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type)); + let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type)); self.store_operand(bcx, cast_ptr, Some(llalign), val); } @@ -919,7 +920,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn store_return(&mut self, bcx: &Builder<'a, 'tcx>, dest: ReturnDest, - ret_ty: ArgType, + ret_ty: &ArgType<'tcx>, op: OperandRef<'tcx>) { use self::ReturnDest::*; diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index dbd928194c032..4d5b691c86ebb 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,7 +18,8 @@ use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir; use rustc::mir::tcx::LvalueTy; -use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::layout::{self, LayoutTyper}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; @@ -148,7 +149,7 @@ impl<'tcx> Const<'tcx> { } else { // Otherwise, or if the value is not immediate, we create // a constant LLVM global and cast its address if necessary. - let align = type_of::align_of(ccx, self.ty); + let align = ccx.align_of(self.ty); let ptr = consts::addr_of(ccx, self.llval, align, "const"); OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned) }; @@ -717,7 +718,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { Base::Value(llval) => { // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) let align = if self.ccx.shared().type_is_sized(ty) { - type_of::align_of(self.ccx, ty) + self.ccx.align_of(ty) } else { self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign }; @@ -979,7 +980,6 @@ fn trans_const<'a, 'tcx>( vals: &[ValueRef] ) -> ValueRef { let l = ccx.layout_of(t); - let dl = &ccx.tcx().data_layout; let variant_index = match *kind { mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, @@ -1002,7 +1002,7 @@ fn trans_const<'a, 'tcx>( let mut vals_with_discr = vec![lldiscr]; vals_with_discr.extend_from_slice(vals); let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); + let needed_padding = l.size(ccx).bytes() - variant.stride().bytes(); if needed_padding > 0 { contents.push(padding(ccx, needed_padding)); } @@ -1022,25 +1022,20 @@ fn trans_const<'a, 'tcx>( C_vector(vals) } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0]; if variant_index as u64 == nndiscr { assert_eq!(vals.len(), 1); vals[0] } else { - C_null(type_of::sizing_type_of(ccx, nnty)) + C_null(type_of::type_of(ccx, t)) } } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { if variant_index as u64 == nndiscr { C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) } else { - let fields = adt::compute_fields(ccx, t, nndiscr as usize, false); - let vals = fields.iter().map(|&ty| { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::sizing_type_of(ccx, ty)) - }).collect::>(); - C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) + // Always use null even if it's not the `discrfield`th + // field; see #8506. + C_null(type_of::type_of(ccx, t)) } } _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index dd8c1d0e1f031..fc889604ab88e 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -9,7 +9,8 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, layout, Ty, TypeFoldable}; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::layout::{self, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 21bbbea77d442..c8d15d28708f4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -11,7 +11,8 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; -use rustc::ty::{self, layout}; +use rustc::ty; +use rustc::ty::layout::{self, LayoutTyper}; use rustc::mir::{self, Mir}; use rustc::mir::tcx::LvalueTy; use rustc::ty::subst::Substs; @@ -52,7 +53,7 @@ pub struct MirContext<'a, 'tcx:'a> { ccx: &'a CrateContext<'a, 'tcx>, - fn_ty: FnType, + fn_ty: FnType<'tcx>, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the @@ -454,6 +455,23 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert_eq!((meta.cast, meta.pad), (None, None)); let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; + + // FIXME(eddyb) As we can't perfectly represent the data and/or + // vtable pointer in a fat pointers in Rust's typesystem, and + // because we split fat pointers into two ArgType's, they're + // not the right type so we have to cast them for now. + let pointee = match arg_ty.sty { + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty, + ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(), + _ => bug!() + }; + let data_llty = type_of::in_memory_type_of(bcx.ccx, pointee); + let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); + + let llarg = bcx.pointercast(llarg, data_llty.ptr_to()); + let llmeta = bcx.pointercast(llmeta, meta_llty); + OperandValue::Pair(llarg, llmeta) } else { OperandValue::Immediate(llarg) diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index da24c03fdc2a0..771a88238b2b7 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty::{self, Ty}; -use rustc::ty::layout::Layout; +use rustc::ty::layout::{Layout, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d487aa6cd5be6..8f7cb914c4735 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::Layout; +use rustc::ty::layout::{Layout, LayoutTyper}; use rustc::mir::tcx::LvalueTy; use rustc::mir; use middle::lang_items::ExchangeMallocFnLangItem; @@ -438,7 +438,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let llty = type_of::type_of(bcx.ccx, content_ty); let llsize = machine::llsize_of(bcx.ccx, llty); - let align = type_of::align_of(bcx.ccx, content_ty); + let align = bcx.ccx.align_of(content_ty); let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index a5722e6e520d0..d4ab6b0782855 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -13,127 +13,12 @@ use adt; use common::*; use machine; use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::layout::LayoutTyper; use trans_item::DefPathBasedNames; use type_::Type; use syntax::ast; - -// A "sizing type" is an LLVM type, the size and alignment of which are -// guaranteed to be equivalent to what you would get out of `type_of()`. It's -// useful because: -// -// (1) It may be cheaper to compute the sizing type than the full type if all -// you're interested in is the size and/or alignment; -// -// (2) It won't make any recursive calls to determine the structure of the -// type behind pointers. This can help prevent infinite loops for -// recursive types. For example, enum types rely on this behavior. - -pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if let Some(t) = cx.llsizingtypes().borrow().get(&t).cloned() { - return t; - } - - debug!("sizing_type_of {:?}", t); - let _recursion_lock = cx.enter_type_of(t); - - let ptr_sizing_ty = |ty: Ty<'tcx>| { - if cx.shared().type_is_sized(ty) { - Type::i8p(cx) - } else { - Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false) - } - }; - let llsizingty = match t.sty { - _ if !cx.shared().type_is_sized(t) => { - Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false) - } - - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - ty::TyNever => Type::nil(cx), - - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - ptr_sizing_ty(ty) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_sizing_ty(t.boxed_ty()) - } - - ty::TyFnDef(..) => Type::nil(cx), - ty::TyFnPtr(_) => Type::i8p(cx), - - ty::TyArray(ty, size) => { - let llty = sizing_type_of(cx, ty); - let size = size as u64; - Type::array(&llty, size) - } - - ty::TyTuple(ref tys, _) if tys.is_empty() => { - Type::nil(cx) - } - - ty::TyAdt(..) if t.is_simd() => { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - Type::vector(&llet, n) - } - - ty::TyTuple(..) | ty::TyAdt(..) | ty::TyClosure(..) => { - adt::sizing_type_of(cx, t, false) - } - - ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | - ty::TyAnon(..) | ty::TyError => { - bug!("fictitious type {:?} in sizing_type_of()", t) - } - ty::TySlice(_) | ty::TyDynamic(..) | ty::TyStr => bug!() - }; - - debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty); - - cx.llsizingtypes().borrow_mut().insert(t, llsizingty); - - // FIXME(eddyb) Temporary sanity check for ty::layout. - let layout = cx.layout_of(t); - if !cx.shared().type_is_sized(t) { - if !layout.is_unsized() { - bug!("layout should be unsized for type `{}` / {:#?}", - t, layout); - } - - // Unsized types get turned into a fat pointer for LLVM. - return llsizingty; - } - - let r = layout.size(&cx.tcx().data_layout).bytes(); - let l = machine::llsize_of_alloc(cx, llsizingty); - if r != l { - bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", - r, l, t, layout); - } - - let r = layout.align(&cx.tcx().data_layout).abi(); - let l = machine::llalign_of_min(cx, llsizingty) as u64; - if r != l { - bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", - r, l, t, layout); - } - - llsizingty -} - pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { match ty.sty { ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | @@ -147,7 +32,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { +pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { let unsized_part = ccx.tcx().struct_tail(ty); match unsized_part.sty { ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { @@ -196,7 +81,6 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. /// For the LLVM type of a value as a whole, see `type_of`. -/// NB: If you update this, be sure to update `sizing_type_of()` as well. pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { // Check the cache. if let Some(&llty) = cx.lltypes().borrow().get(&t) { @@ -322,10 +206,14 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> llty } -pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) - -> machine::llalign { - let layout = cx.layout_of(t); - layout.align(&cx.tcx().data_layout).abi() as machine::llalign +impl<'a, 'tcx> CrateContext<'a, 'tcx> { + pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign { + self.layout_of(ty).align(self).abi() as machine::llalign + } + + pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize { + self.layout_of(ty).size(self).bytes() as machine::llsize + } } fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 76313b158ab11..bc84ac49da985 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -121,13 +121,13 @@ pub fn unsafe_slice(_: &[UnsafeInner]) { fn str(_: &[u8]) { } -// CHECK: @trait_borrow(i8* nonnull, void (i8*)** noalias nonnull readonly) +// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly) // FIXME #25759 This should also have `nocapture` #[no_mangle] fn trait_borrow(_: &Drop) { } -// CHECK: @trait_box(i8* noalias nonnull, void (i8*)** noalias nonnull readonly) +// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly) #[no_mangle] fn trait_box(_: Box) { }