From f45a474bd6c7ccfe35e7be5f341e3d04aa5d178e Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Tue, 6 Feb 2018 17:11:27 +0000 Subject: [PATCH 1/4] rustc_trans: add abi::CastTarget::ChunkedPrefix --- src/librustc_trans/abi.rs | 40 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 12698964d2e..60f3105170b 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -407,7 +407,8 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum CastTarget { Uniform(Uniform), - Pair(Reg, Reg) + Pair(Reg, Reg), + ChunkedPrefix { prefix: [RegKind; 8], chunk: Size, total: Size } } impl From for CastTarget { @@ -429,7 +430,8 @@ impl CastTarget { CastTarget::Pair(a, b) => { (a.size.abi_align(a.align(cx)) + b.size) .abi_align(self.align(cx)) - } + }, + CastTarget::ChunkedPrefix { total, .. } => total } } @@ -440,6 +442,12 @@ impl CastTarget { cx.data_layout().aggregate_align .max(a.align(cx)) .max(b.align(cx)) + }, + CastTarget::ChunkedPrefix { chunk, .. } => { + cx.data_layout().aggregate_align + .max(Reg { kind: RegKind::Integer, size: chunk }.align(cx)) + .max(Reg { kind: RegKind::Float, size: chunk }.align(cx)) + .max(Reg { kind: RegKind::Vector, size: chunk }.align(cx)) } } } @@ -452,6 +460,34 @@ impl CastTarget { a.llvm_type(cx), b.llvm_type(cx) ], false) + }, + CastTarget::ChunkedPrefix { prefix, chunk, total } => { + let total_chunks = total.bytes() / chunk.bytes(); + let rem_bytes = total.bytes() % chunk.bytes(); + let prefix_chunks = total_chunks.min(prefix.len() as u64); + + let int_ll_type = Reg { kind: RegKind::Integer, size: chunk }.llvm_type(cx); + + // Simple cases simplify to an array + if rem_bytes == 0 && prefix.into_iter().all(|&kind| kind == RegKind::Integer) { + return Type::array(&int_ll_type, total_chunks); + } + + // The final structure is made up of: + // Up to 8 chunks of the type specified in the prefix + // Any other complete chunks as integers + // One final integer needed to make up the total structure size + let mut args: Vec<_> = + prefix.into_iter().take(prefix_chunks as usize) + .map(|&kind| Reg { kind: kind, size: chunk }.llvm_type(cx)) + .chain((0..total_chunks - prefix_chunks).map(|_| int_ll_type)) + .collect(); + + if rem_bytes > 0 { + args.push(Type::ix(cx, rem_bytes * 8)); + } + + Type::struct_(cx, &args, false) } } } From 68042ba0d35f16d66dadf62334ca6bbf20d97268 Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Thu, 8 Feb 2018 11:01:34 +0000 Subject: [PATCH 2/4] rustc_trans: rewrite mips64 abi --- src/librustc_trans/cabi_mips64.rs | 158 +++++++++++++++++++++++++----- 1 file changed, 131 insertions(+), 27 deletions(-) diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index e44063faab8..ad35dbeadfc 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -8,50 +8,154 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgAttribute, ArgType, CastTarget, FnType, LayoutExt, PassMode, Reg, RegKind, Uniform}; use context::CodegenCx; +use rustc::ty::layout::{self, Size}; -use rustc::ty::layout::Size; +fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) { + // Always sign extend u32 values on 64-bit mips + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if let layout::Int(i, signed) = scalar.value { + if !signed && i.size().bits() == 32 { + if let PassMode::Direct(ref mut attrs) = arg.mode { + attrs.set(ArgAttribute::SExt); + return; + } + } + } + } -fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ret: &mut ArgType<'tcx>, - offset: &mut Size) { - if !ret.layout.is_aggregate() { - ret.extend_integer_width_to(64); + arg.extend_integer_width_to(bits); +} + +fn bits_to_int_reg(bits: u64) -> Reg { + if bits <= 8 { + Reg::i8() + } else if bits <= 16 { + Reg::i16() + } else if bits <= 32 { + Reg::i32() } else { - ret.make_indirect(); - *offset += cx.tcx.data_layout.pointer_size; + Reg::i64() } } -fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) { - let dl = &cx.tcx.data_layout; - let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); +fn float_reg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &ArgType<'tcx>, i: usize) -> Option { + match ret.layout.field(cx, i).abi { + layout::Abi::Scalar(ref scalar) => match scalar.value { + layout::F32 => Some(Reg::f32()), + layout::F64 => Some(Reg::f64()), + _ => None + }, + _ => None + } +} - if arg.layout.is_aggregate() { - arg.cast_to(Uniform { - unit: Reg::i64(), - total: size - }); - if !offset.is_abi_aligned(align) { - arg.pad_with(Reg::i64()); - } - } else { - arg.extend_integer_width_to(64); +fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) { + if !ret.layout.is_aggregate() { + extend_integer_width_mips(ret, 64); + return; } - *offset = offset.abi_align(align) + size.abi_align(align); + let size = ret.layout.size; + let bits = size.bits(); + if bits <= 128 { + // Unlike other architectures which return aggregates in registers, MIPS n64 limits the + // use of float registers to structures (not unions) containing exactly one or two + // float fields. + + if let layout::FieldPlacement::Arbitrary { .. } = ret.layout.fields { + if ret.layout.fields.count() == 1 { + if let Some(reg) = float_reg(cx, ret, 0) { + ret.cast_to(reg); + return; + } + } else if ret.layout.fields.count() == 2 { + if let Some(reg0) = float_reg(cx, ret, 0) { + if let Some(reg1) = float_reg(cx, ret, 1) { + ret.cast_to(CastTarget::Pair(reg0, reg1)); + return; + } + } + } + } + + // Cast to a uniform int structure + ret.cast_to(Uniform { + unit: bits_to_int_reg(bits), + total: size + }); + } else { + ret.make_indirect(); + } +} + +fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) { + if !arg.layout.is_aggregate() { + extend_integer_width_mips(arg, 64); + return; + } + + let dl = &cx.tcx.data_layout; + let size = arg.layout.size; + let mut prefix = [RegKind::Integer; 8]; + let mut prefix_index = 0; + + match arg.layout.fields { + layout::FieldPlacement::Array { .. } => { + // Arrays are passed indirectly + arg.make_indirect(); + return; + } + layout::FieldPlacement::Union(_) => { + // Unions and are always treated as a series of 64-bit integer chunks + }, + layout::FieldPlacement::Arbitrary { .. } => { + // Structures are split up into a series of 64-bit integer chunks, but any aligned + // doubles not part of another aggregate are passed as floats. + let mut last_offset = Size::from_bytes(0); + + for i in 0..arg.layout.fields.count() { + let field = arg.layout.field(cx, i); + let offset = arg.layout.fields.offset(i); + + // We only care about aligned doubles + if let layout::Abi::Scalar(ref scalar) = field.abi { + if let layout::F64 = scalar.value { + if offset.is_abi_aligned(dl.f64_align) { + // Skip over enough integers to cover [last_offset, offset) + assert!(last_offset.is_abi_aligned(dl.f64_align)); + prefix_index += ((offset - last_offset).bits() / 64) as usize; + + if prefix_index >= prefix.len() { + break; + } + + prefix[prefix_index] = RegKind::Float; + prefix_index += 1; + last_offset = offset + Reg::f64().size; + } + } + } + } + } + }; + + // Extract first 8 chunks as the prefix + arg.cast_to(CastTarget::ChunkedPrefix { + prefix: prefix, + chunk: Size::from_bytes(8), + total: size + }); } pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) { - let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(cx, &mut fty.ret, &mut offset); + classify_ret_ty(cx, &mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(cx, arg, &mut offset); + classify_arg_ty(cx, arg); } } From 05d66dc7a4ff053b5cbfa5ddafa890af291f4fc2 Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Wed, 14 Feb 2018 12:47:38 +0000 Subject: [PATCH 3/4] rustc_trans: add chunked prefix fields to CastTarget --- src/librustc_trans/abi.rs | 138 +++++++++++------------------- src/librustc_trans/cabi_x86_64.rs | 2 +- 2 files changed, 53 insertions(+), 87 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 60f3105170b..ee0f2415bd8 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -40,7 +40,7 @@ use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use libc::c_uint; -use std::{cmp, iter}; +use std::cmp; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; @@ -279,30 +279,6 @@ impl Uniform { pub fn align(&self, cx: &CodegenCx) -> Align { self.unit.align(cx) } - - pub fn llvm_type(&self, cx: &CodegenCx) -> Type { - let llunit = self.unit.llvm_type(cx); - - if self.total <= self.unit.size { - return llunit; - } - - let count = self.total.bytes() / self.unit.size.bytes(); - let rem_bytes = self.total.bytes() % self.unit.size.bytes(); - - if rem_bytes == 0 { - return Type::array(&llunit, count); - } - - // Only integers can be really split further. - assert_eq!(self.unit.kind, RegKind::Integer); - - let args: Vec<_> = (0..count).map(|_| llunit) - .chain(iter::once(Type::ix(cx, rem_bytes * 8))) - .collect(); - - Type::struct_(cx, &args, false) - } } pub trait LayoutExt<'tcx> { @@ -405,91 +381,81 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum CastTarget { - Uniform(Uniform), - Pair(Reg, Reg), - ChunkedPrefix { prefix: [RegKind; 8], chunk: Size, total: Size } +pub struct CastTarget { + pub prefix: [Option; 8], + pub prefix_chunk: Size, + pub rest: Uniform, } impl From for CastTarget { fn from(unit: Reg) -> CastTarget { - CastTarget::Uniform(Uniform::from(unit)) + CastTarget::from(Uniform::from(unit)) } } impl From for CastTarget { fn from(uniform: Uniform) -> CastTarget { - CastTarget::Uniform(uniform) + CastTarget { + prefix: [None; 8], + prefix_chunk: Size::from_bytes(0), + rest: uniform + } } } impl CastTarget { - pub fn size(&self, cx: &CodegenCx) -> Size { - match *self { - CastTarget::Uniform(u) => u.total, - CastTarget::Pair(a, b) => { - (a.size.abi_align(a.align(cx)) + b.size) - .abi_align(self.align(cx)) - }, - CastTarget::ChunkedPrefix { total, .. } => total + pub fn pair(a: Reg, b: Reg) -> CastTarget { + CastTarget { + prefix: [Some(a.kind), None, None, None, None, None, None, None], + prefix_chunk: a.size, + rest: Uniform::from(b) } } + pub fn size(&self, cx: &CodegenCx) -> Size { + (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) + .abi_align(self.rest.align(cx)) + self.rest.total + } + pub fn align(&self, cx: &CodegenCx) -> Align { - match *self { - CastTarget::Uniform(u) => u.align(cx), - CastTarget::Pair(a, b) => { - cx.data_layout().aggregate_align - .max(a.align(cx)) - .max(b.align(cx)) - }, - CastTarget::ChunkedPrefix { chunk, .. } => { - cx.data_layout().aggregate_align - .max(Reg { kind: RegKind::Integer, size: chunk }.align(cx)) - .max(Reg { kind: RegKind::Float, size: chunk }.align(cx)) - .max(Reg { kind: RegKind::Vector, size: chunk }.align(cx)) - } - } + self.prefix.iter() + .filter_map(|x| x.map(|kind| Reg { kind: kind, size: self.prefix_chunk }.align(cx))) + .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), + |acc, align| acc.max(align)) } pub fn llvm_type(&self, cx: &CodegenCx) -> Type { - match *self { - CastTarget::Uniform(u) => u.llvm_type(cx), - CastTarget::Pair(a, b) => { - Type::struct_(cx, &[ - a.llvm_type(cx), - b.llvm_type(cx) - ], false) - }, - CastTarget::ChunkedPrefix { prefix, chunk, total } => { - let total_chunks = total.bytes() / chunk.bytes(); - let rem_bytes = total.bytes() % chunk.bytes(); - let prefix_chunks = total_chunks.min(prefix.len() as u64); + let rest_ll_unit = self.rest.unit.llvm_type(cx); + let rest_count = self.rest.total.bytes() / self.rest.unit.size.bytes(); + let rem_bytes = self.rest.total.bytes() % self.rest.unit.size.bytes(); - let int_ll_type = Reg { kind: RegKind::Integer, size: chunk }.llvm_type(cx); + if self.prefix.iter().all(|x| x.is_none()) { + // Simplify to a single unit when there is no prefix and size <= unit size + if self.rest.total <= self.rest.unit.size { + return rest_ll_unit; + } - // Simple cases simplify to an array - if rem_bytes == 0 && prefix.into_iter().all(|&kind| kind == RegKind::Integer) { - return Type::array(&int_ll_type, total_chunks); - } - - // The final structure is made up of: - // Up to 8 chunks of the type specified in the prefix - // Any other complete chunks as integers - // One final integer needed to make up the total structure size - let mut args: Vec<_> = - prefix.into_iter().take(prefix_chunks as usize) - .map(|&kind| Reg { kind: kind, size: chunk }.llvm_type(cx)) - .chain((0..total_chunks - prefix_chunks).map(|_| int_ll_type)) - .collect(); - - if rem_bytes > 0 { - args.push(Type::ix(cx, rem_bytes * 8)); - } - - Type::struct_(cx, &args, false) + // Simplify to array when all chunks are the same size and type + if rem_bytes == 0 { + return Type::array(&rest_ll_unit, rest_count); } } + + // Create list of fields in the main structure + let mut args: Vec<_> = + self.prefix.iter().flat_map(|option_kind| option_kind.map( + |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx))) + .chain((0..rest_count).map(|_| rest_ll_unit)) + .collect(); + + // Append final integer + if rem_bytes != 0 { + // Only integers can be really split further. + assert_eq!(self.rest.unit.kind, RegKind::Integer); + args.push(Type::ix(cx, rem_bytes * 8)); + } + + Type::struct_(cx, &args, false) } } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index b8144a3ca7a..7eadaa7f493 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -171,7 +171,7 @@ fn cast_target(cls: &[Option], size: Size) -> CastTarget { let mut target = CastTarget::from(lo); if size > offset { if let Some(hi) = reg_component(cls, &mut i, size - offset) { - target = CastTarget::Pair(lo, hi); + target = CastTarget::pair(lo, hi); } } assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None); From 47c33f7bd0535fe6e47e38700ac1c8bf33e3f0d5 Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Wed, 14 Feb 2018 12:48:04 +0000 Subject: [PATCH 4/4] rustc_trans: adjust mips64 abi to use new CastTarget --- src/librustc_trans/cabi_mips64.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index ad35dbeadfc..94bf53cee1e 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -73,7 +73,7 @@ fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) } else if ret.layout.fields.count() == 2 { if let Some(reg0) = float_reg(cx, ret, 0) { if let Some(reg1) = float_reg(cx, ret, 1) { - ret.cast_to(CastTarget::Pair(reg0, reg1)); + ret.cast_to(CastTarget::pair(reg0, reg1)); return; } } @@ -98,7 +98,7 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) let dl = &cx.tcx.data_layout; let size = arg.layout.size; - let mut prefix = [RegKind::Integer; 8]; + let mut prefix = [None; 8]; let mut prefix_index = 0; match arg.layout.fields { @@ -123,15 +123,20 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) if let layout::Abi::Scalar(ref scalar) = field.abi { if let layout::F64 = scalar.value { if offset.is_abi_aligned(dl.f64_align) { - // Skip over enough integers to cover [last_offset, offset) + // Insert enough integers to cover [last_offset, offset) assert!(last_offset.is_abi_aligned(dl.f64_align)); - prefix_index += ((offset - last_offset).bits() / 64) as usize; + for _ in 0..((offset - last_offset).bits() / 64) + .min((prefix.len() - prefix_index) as u64) { - if prefix_index >= prefix.len() { + prefix[prefix_index] = Some(RegKind::Integer); + prefix_index += 1; + } + + if prefix_index == prefix.len() { break; } - prefix[prefix_index] = RegKind::Float; + prefix[prefix_index] = Some(RegKind::Float); prefix_index += 1; last_offset = offset + Reg::f64().size; } @@ -142,10 +147,11 @@ fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) }; // Extract first 8 chunks as the prefix - arg.cast_to(CastTarget::ChunkedPrefix { + let rest_size = size - Size::from_bytes(8) * prefix_index as u64; + arg.cast_to(CastTarget { prefix: prefix, - chunk: Size::from_bytes(8), - total: size + prefix_chunk: Size::from_bytes(8), + rest: Uniform { unit: Reg::i64(), total: rest_size } }); }