From d3b9ece4c0028ff7e36e34df2d2b26366f9c0648 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 16 Aug 2020 19:24:59 -0400 Subject: [PATCH 1/6] add tests related to tuple scalar layout with ZST in last field --- src/test/codegen/tuple-layout-opt.rs | 36 +++++++++++++++++++ .../dst-tuple-no-reorder.rs | 26 ++++++++++++++ .../mir/mir_const_prop_tuple_field_reorder.rs | 27 ++++++++++++++ 3 files changed, 89 insertions(+) create mode 100644 src/test/codegen/tuple-layout-opt.rs create mode 100644 src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs create mode 100644 src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs diff --git a/src/test/codegen/tuple-layout-opt.rs b/src/test/codegen/tuple-layout-opt.rs new file mode 100644 index 00000000000..e86c75f3f48 --- /dev/null +++ b/src/test/codegen/tuple-layout-opt.rs @@ -0,0 +1,36 @@ +// ignore-emscripten +// compile-flags: -C no-prepopulate-passes + +// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244) + +#![crate_type="lib"] + +type ScalarZstLast = (u128, ()); +// CHECK: define i128 @test_ScalarZstLast(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} } + +type ScalarZstFirst = ((), u128); +// CHECK: define i128 @test_ScalarZstFirst(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} } + +type ScalarPairZstLast = (u8, u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} } + +type ScalarPairZstFirst = ((), u8, u128); +// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} } + +type ScalarPairLotsOfZsts = ((), u8, (), u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} } + +type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ()); +// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} } diff --git a/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs new file mode 100644 index 00000000000..26b923f431f --- /dev/null +++ b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs @@ -0,0 +1,26 @@ +// run-pass + +#![feature(unsized_tuple_coercion)] + +// Ensure that unsizable fields that might be accessed don't get reordered + +fn nonzero_size() { + let sized: (u8, [u32; 2]) = (123, [456, 789]); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 2); + assert_eq!(unsize.1[0], 456); + assert_eq!(unsize.1[1], 789); +} + +fn zst() { + let sized: (u8, [u32; 0]) = (123, []); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 0); +} + +pub fn main() { + nonzero_size(); + zst(); +} diff --git a/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs new file mode 100644 index 00000000000..629b50dec65 --- /dev/null +++ b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs @@ -0,0 +1,27 @@ +// compile-flags: -Z mir-opt-level=2 +// build-pass +#![crate_type="lib"] + +// This used to ICE: const-prop did not account for field reordering of scalar pairs, +// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`, +// causing assertion failures in codegen when trying to read 0x1337 at the wrong type. + +pub enum FooEnum { + VariantBar, + VariantBaz, + VariantBuz, +} + +pub fn wrong_index() -> isize { + let (_, b) = id((FooEnum::VariantBar, 0x1337)); + b +} + +pub fn wrong_index_two() -> isize { + let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338))); + b +} + +fn id(x: T) -> T { + x +} From e5d85f917b8965a5e62513c17cbb887366b152bc Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 16 Aug 2020 19:25:24 -0400 Subject: [PATCH 2/6] allow reordering of the last field of a MaybeUnsized struct if it's a ZST --- compiler/rustc_middle/src/ty/layout.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 08bd131565b..4038deb3233 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -289,25 +289,32 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let optimize = !repr.inhibit_struct_field_reordering_opt(); if optimize { - let end = - if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; - let optimizing = &mut inverse_memory_index[..end]; let field_align = |f: &TyAndLayout<'_>| { if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi } }; match kind { - StructKind::AlwaysSized | StructKind::MaybeUnsized => { - optimizing.sort_by_key(|&x| { + StructKind::AlwaysSized => { + inverse_memory_index.sort_by_key(|&x| { // Place ZSTs first to avoid "interesting offsets", // especially with only one or two non-ZST fields. let f = &fields[x as usize]; (!f.is_zst(), cmp::Reverse(field_align(f))) }); } + StructKind::MaybeUnsized => { + // Sort in descending alignment, except for the last field, + // which may be accessed through an unsized type. + inverse_memory_index[..fields.len() - 1] + .sort_by_key(|&x| cmp::Reverse(field_align(&fields[x as usize]))); + // Place ZSTs first to avoid "interesting offsets". + // This will reorder the last field if it is a ZST, which is okay because + // there's nothing in memory that could be accessed through an unsized type. + inverse_memory_index.sort_by_key(|&x| !fields[x as usize].is_zst()); + } StructKind::Prefixed(..) => { // Sort in ascending alignment so that the layout stay optimal // regardless of the prefix - optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); + inverse_memory_index.sort_by_key(|&x| field_align(&fields[x as usize])); } } } From e9bc3ddb073f2261ac46832d985efe8db863ed6a Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 16 Aug 2020 20:38:57 -0400 Subject: [PATCH 3/6] test that we do not change the offset of ZST tuple fields when unsizing --- .../dst-tuple-zst-offsets.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs diff --git a/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs b/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs new file mode 100644 index 00000000000..b0cefe77039 --- /dev/null +++ b/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs @@ -0,0 +1,22 @@ +// run-pass + +#![feature(unsized_tuple_coercion)] + +// Check that we do not change the offsets of ZST fields when unsizing + +fn scalar_layout() { + let sized: &(u8, [(); 13]) = &(123, [(); 13]); + let unsize: &(u8, [()]) = sized; + assert_eq!(sized.1.as_ptr(), unsize.1.as_ptr()); +} + +fn scalarpair_layout() { + let sized: &(u8, u16, [(); 13]) = &(123, 456, [(); 13]); + let unsize: &(u8, u16, [()]) = sized; + assert_eq!(sized.2.as_ptr(), unsize.2.as_ptr()); +} + +pub fn main() { + scalar_layout(); + scalarpair_layout(); +} From 68217c9e0f1269d29b4c1c72d08fb1b95bf441cd Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 16 Aug 2020 19:25:39 -0400 Subject: [PATCH 4/6] ignore zst offsets instead --- compiler/rustc_codegen_ssa/src/mir/place.rs | 32 +++-- compiler/rustc_middle/src/ty/layout.rs | 135 ++++++++------------ 2 files changed, 78 insertions(+), 89 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 05656774f0e..7c0eddea522 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -93,15 +93,29 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let effective_field_align = self.align.restrict_for_offset(offset); let mut simple = || { - // Unions and newtypes only use an offset of 0. - let llval = if offset.bytes() == 0 { - self.llval - } else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi { - // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); - bx.struct_gep(self.llval, 1) - } else { - bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) + let llval = match self.layout.abi { + _ if offset.bytes() == 0 => { + // Unions and newtypes only use an offset of 0. + // Also handles the first field of Scalar and ScalarPair layouts. + self.llval + } + Abi::ScalarPair(ref a, ref b) + if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => + { + // Offset matches second field. + bx.struct_gep(self.llval, 1) + } + Abi::ScalarPair(..) | Abi::Scalar(_) => { + // ZST fields are not included in Scalar and ScalarPair layouts, so manually offset the pointer. + assert!( + field.is_zst(), + "non-ZST field offset does not match layout: {:?}", + field + ); + let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())]) + } + _ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)), }; PlaceRef { // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 4038deb3233..3ec08423472 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -289,32 +289,25 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let optimize = !repr.inhibit_struct_field_reordering_opt(); if optimize { + let end = + if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; + let optimizing = &mut inverse_memory_index[..end]; let field_align = |f: &TyAndLayout<'_>| { if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi } }; match kind { - StructKind::AlwaysSized => { - inverse_memory_index.sort_by_key(|&x| { + StructKind::AlwaysSized | StructKind::MaybeUnsized => { + optimizing.sort_by_key(|&x| { // Place ZSTs first to avoid "interesting offsets", // especially with only one or two non-ZST fields. let f = &fields[x as usize]; (!f.is_zst(), cmp::Reverse(field_align(f))) }); } - StructKind::MaybeUnsized => { - // Sort in descending alignment, except for the last field, - // which may be accessed through an unsized type. - inverse_memory_index[..fields.len() - 1] - .sort_by_key(|&x| cmp::Reverse(field_align(&fields[x as usize]))); - // Place ZSTs first to avoid "interesting offsets". - // This will reorder the last field if it is a ZST, which is okay because - // there's nothing in memory that could be accessed through an unsized type. - inverse_memory_index.sort_by_key(|&x| !fields[x as usize].is_zst()); - } StructKind::Prefixed(..) => { // Sort in ascending alignment so that the layout stay optimal // regardless of the prefix - inverse_memory_index.sort_by_key(|&x| field_align(&fields[x as usize])); + optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } } @@ -397,78 +390,60 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // Unpack newtype ABIs and find scalar pairs. if sized && size.bytes() > 0 { - // All other fields must be ZSTs, and we need them to all start at 0. - let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); - if zst_offsets.all(|(_, o)| o.bytes() == 0) { - let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + // All other fields must be ZSTs. + let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); - match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { - // We have exactly one non-ZST field. - (Some((i, field)), None, None) => { - // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 - && align.abi == field.align.abi - && size == field.size - { - match field.abi { - // For plain scalars, or vectors of them, we can't unpack - // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize => { - abi = field.abi.clone(); - } - // But scalar pairs are Rust-specific and get - // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi.clone(); - } - _ => {} + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size + { + match field.abi { + // For plain scalars, or vectors of them, we can't unpack + // newtypes for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) | Abi::Vector { .. } if optimize => { + abi = field.abi.clone(); } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi.clone(); + } + _ => {} } } - - // Two non-ZST fields, and they're both scalars. - ( - Some(( - i, - &TyAndLayout { - layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. - }, - )), - Some(( - j, - &TyAndLayout { - layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. - }, - )), - None, - ) => { - // Order by the memory placement, not source order. - let ((i, a), (j, b)) = if offsets[i] < offsets[j] { - ((i, a), (j, b)) - } else { - ((j, b), (i, a)) - }; - let pair = self.scalar_pair(a.clone(), b.clone()); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if offsets[i] == pair_offsets[0] - && offsets[j] == pair_offsets[1] - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - - _ => {} } + + // Two non-ZST fields, and they're both scalars. + ( + Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })), + Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })), + None, + ) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = + if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) }; + let pair = self.scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!(), + }; + if offsets[i] == pair_offsets[0] + && offsets[j] == pair_offsets[1] + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + + _ => {} } } From 6fc1d8bc13124bb134d7ab54e56237821a55912e Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sun, 16 Aug 2020 22:02:18 -0400 Subject: [PATCH 5/6] add codegen test --- src/test/codegen/zst-offset.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 src/test/codegen/zst-offset.rs diff --git a/src/test/codegen/zst-offset.rs b/src/test/codegen/zst-offset.rs new file mode 100644 index 00000000000..3c63cddccad --- /dev/null +++ b/src/test/codegen/zst-offset.rs @@ -0,0 +1,29 @@ +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +// Hack to get the correct size for the length part in slices +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) +#[no_mangle] +pub fn helper(_: usize) { +} + +// Check that we correctly generate a GEP for a ZST that is not included in Scalar layout +// CHECK-LABEL: @scalar_layout +#[no_mangle] +pub fn scalar_layout(s: &(u64, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8 + let x = &s.1; + &x; // keep variable in an alloca +} + +// Check that we correctly generate a GEP for a ZST that is not included in ScalarPair layout +// CHECK-LABEL: @scalarpair_layout +#[no_mangle] +pub fn scalarpair_layout(s: &(u64, u32, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12 + let x = &s.2; + &x; // keep variable in an alloca +} From 24e0913e37cc6fc363b37d13bf519db212f175a2 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Thu, 20 Aug 2020 18:59:52 -0400 Subject: [PATCH 6/6] handle vector layout --- compiler/rustc_codegen_ssa/src/mir/place.rs | 20 ++++++++++++-------- src/test/codegen/zst-offset.rs | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 7c0eddea522..799f3b3a575 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -96,7 +96,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let llval = match self.layout.abi { _ if offset.bytes() == 0 => { // Unions and newtypes only use an offset of 0. - // Also handles the first field of Scalar and ScalarPair layouts. + // Also handles the first field of Scalar, ScalarPair, and Vector layouts. self.llval } Abi::ScalarPair(ref a, ref b) @@ -105,16 +105,20 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // Offset matches second field. bx.struct_gep(self.llval, 1) } - Abi::ScalarPair(..) | Abi::Scalar(_) => { - // ZST fields are not included in Scalar and ScalarPair layouts, so manually offset the pointer. - assert!( - field.is_zst(), - "non-ZST field offset does not match layout: {:?}", - field - ); + Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { + // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())]) } + Abi::Scalar(_) | Abi::ScalarPair(..) => { + // All fields of Scalar and ScalarPair layouts must have been handled by this point. + // Vector layouts have additional fields for each element of the vector, so don't panic in that case. + bug!( + "offset of non-ZST field `{:?}` does not match layout `{:#?}`", + field, + self.layout + ); + } _ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)), }; PlaceRef { diff --git a/src/test/codegen/zst-offset.rs b/src/test/codegen/zst-offset.rs index 3c63cddccad..0c015fca325 100644 --- a/src/test/codegen/zst-offset.rs +++ b/src/test/codegen/zst-offset.rs @@ -1,6 +1,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(repr_simd)] // Hack to get the correct size for the length part in slices // CHECK: @helper([[USIZE:i[0-9]+]] %_1) @@ -27,3 +28,16 @@ pub fn scalarpair_layout(s: &(u64, u32, ())) { let x = &s.2; &x; // keep variable in an alloca } + +#[repr(simd)] +pub struct U64x4(u64, u64, u64, u64); + +// Check that we correctly generate a GEP for a ZST that is not included in Vector layout +// CHECK-LABEL: @vector_layout +#[no_mangle] +pub fn vector_layout(s: &(U64x4, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32 + let x = &s.1; + &x; // keep variable in an alloca +}