Remove drop flags from structs and enums implementing Drop.

This commit is contained in:
Eduard Burtescu 2016-08-23 10:39:30 +03:00
parent d0654ae5e5
commit 119508cdb4
39 changed files with 305 additions and 935 deletions

View File

@ -121,7 +121,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize;
/// }
/// ```
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
@ -147,7 +147,7 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
/// nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
/// as `Weak<T>` pointers.
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
@ -559,15 +559,6 @@ impl<T: ?Sized> Drop for Arc<T> {
#[unsafe_destructor_blind_to_params]
#[inline]
fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
let thin = *self.ptr as *const ();
if thin as usize == mem::POST_DROP_USIZE {
return;
}
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
@ -755,12 +746,6 @@ impl<T: ?Sized> Drop for Weak<T> {
/// ```
fn drop(&mut self) {
let ptr = *self.ptr;
let thin = ptr as *const ();
// see comments above for why this check is here
if thin as usize == mem::POST_DROP_USIZE {
return;
}
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about

View File

@ -88,7 +88,7 @@
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unsize)]
#![cfg_attr(not(test), feature(fused, raw, fn_traits, placement_new_protocol))]

View File

@ -44,7 +44,7 @@ use core::cmp;
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawVec<T> {
ptr: Unique<T>,
cap: usize,
@ -546,13 +546,6 @@ impl<T> RawVec<T> {
mem::forget(self);
output
}
/// This is a stupid name in the hopes that someone will find this in the
/// not too distant future and remove it with the rest of
/// #[unsafe_no_drop_flag]
pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
self.cap != mem::POST_DROP_USIZE
}
}
impl<T> Drop for RawVec<T> {
@ -560,7 +553,7 @@ impl<T> Drop for RawVec<T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
if elem_size != 0 && self.cap != 0 {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;

View File

@ -182,7 +182,7 @@ struct RcBox<T: ?Sized> {
/// A reference-counted pointer type over an immutable value.
///
/// See the [module level documentation](./index.html) for more details.
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
ptr: Shared<RcBox<T>>,
@ -466,21 +466,18 @@ impl<T: ?Sized> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self.ptr;
let thin = ptr as *const ();
if thin as usize != mem::POST_DROP_USIZE {
self.dec_strong();
if self.strong() == 0 {
// destroy the contained object
ptr::drop_in_place(&mut (*ptr).value);
self.dec_strong();
if self.strong() == 0 {
// destroy the contained object
ptr::drop_in_place(&mut (*ptr).value);
// remove the implicit "strong weak" pointer now that we've
// destroyed the contents.
self.dec_weak();
// remove the implicit "strong weak" pointer now that we've
// destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
}
}
@ -724,7 +721,7 @@ impl<T> From<T> for Rc<T> {
/// dropped.
///
/// See the [module level documentation](./index.html) for more.
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<RcBox<T>>,
@ -825,15 +822,12 @@ impl<T: ?Sized> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self.ptr;
let thin = ptr as *const ();
if thin as usize != mem::POST_DROP_USIZE {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
}
}

View File

@ -52,7 +52,7 @@
#![feature(step_by)]
#![feature(unicode)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag)]
#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![cfg_attr(test, feature(rand, test))]
#![no_std]

View File

@ -268,7 +268,7 @@ use super::range::RangeArgument;
/// Vec does not currently guarantee the order in which elements are dropped
/// (the order has changed in the past, and may change again).
///
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Vec<T> {
buf: RawVec<T>,
@ -1600,11 +1600,9 @@ impl<T: Ord> Ord for Vec<T> {
impl<T> Drop for Vec<T> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
if self.buf.unsafe_no_drop_flag_needs_drop() {
unsafe {
// use drop for [T]
ptr::drop_in_place(&mut self[..]);
}
unsafe {
// use drop for [T]
ptr::drop_in_place(&mut self[..]);
}
// RawVec handles deallocation
}

View File

@ -244,19 +244,6 @@ extern "rust-intrinsic" {
/// crate it is invoked in.
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// Creates a value initialized to so that its drop flag,
/// if any, says that it has been dropped.
///
/// `init_dropped` is unsafe because it returns a datum with all
/// of its bytes set to the drop flag, which generally does not
/// correspond to a valid value.
///
/// This intrinsic is likely to be deprecated in the future when
/// Rust moves to non-zeroing dynamic drop (and thus removes the
/// embedded drop flags that are being established by this
/// intrinsic).
pub fn init_dropped<T>() -> T;
/// Creates a value initialized to zero.
///
/// `init` is unsafe because it returns a zeroed-out datum,

View File

@ -241,27 +241,6 @@ pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}
/// Creates a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop", issue = "5016")]
pub unsafe fn dropped<T>() -> T {
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
dropped_impl()
}
/// Bypasses Rust's normal memory-initialization checks by pretending to
/// produce a value of type T, while doing nothing at all.
///
@ -518,56 +497,6 @@ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
macro_rules! repeat_u8_as_u16 {
($name:expr) => { (($name as u16) << 8 |
($name as u16)) }
}
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "16")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize;
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
/// value.
///

View File

@ -140,21 +140,6 @@ pub unsafe fn read<T>(src: *const T) -> T {
tmp
}
#[allow(missing_docs)]
#[inline(always)]
#[unstable(feature = "filling_drop",
reason = "may play a larger role in std::ptr future extensions",
issue = "5016")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///

View File

@ -889,8 +889,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"adds unstable command line options to rustc interface"),
force_overflow_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
"force overflow checks on or off"),
force_dropflag_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
"force drop flag checks on or off"),
trace_macros: bool = (false, parse_bool, [UNTRACKED],
"for every macro invocation, print its name and arguments"),
enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED],
@ -2427,10 +2425,6 @@ mod tests {
opts.debugging_opts.force_overflow_checks = Some(true);
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
opts = reference.clone();
opts.debugging_opts.force_dropflag_checks = Some(true);
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
opts = reference.clone();
opts.debugging_opts.enable_nonzeroing_move_hints = true;
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());

View File

@ -891,17 +891,6 @@ impl<'a, 'gcx, 'tcx> Layout {
let mut st = Struct::new(dl, packed);
st.extend(dl, fields, ty)?;
// FIXME(16758) don't add a drop flag to unsized structs, as it
// won't actually be in the location we say it is because it'll be after
// the unsized field. Several other pieces of code assume that the unsized
// field is definitely the last one.
if def.dtor_kind().has_drop_flag() &&
ty.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
st.extend(dl, Some(Ok(&Scalar {
value: Int(I8),
non_zero: false
})).into_iter(), ty)?;
}
Univariant {
variant: st,
non_zero: Some(def.did) == tcx.lang_items.non_zero()
@ -911,24 +900,18 @@ impl<'a, 'gcx, 'tcx> Layout {
let hint = *tcx.lookup_repr_hints(def.did).get(0)
.unwrap_or(&attr::ReprAny);
let dtor = def.dtor_kind().has_drop_flag();
let drop_flag = if dtor {
Some(Scalar { value: Int(I8), non_zero: false })
} else {
None
};
if def.variants.is_empty() {
// Uninhabitable; represent as unit
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
let mut st = Struct::new(dl, false);
st.extend(dl, drop_flag.iter().map(Ok), ty)?;
return success(Univariant { variant: st, non_zero: false });
return success(Univariant {
variant: Struct::new(dl, false),
non_zero: false
});
}
if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) {
if def.variants.iter().all(|v| v.fields.is_empty()) {
// All bodies empty -> intlike
let (mut min, mut max) = (i64::MAX, i64::MIN);
for v in &def.variants {
@ -964,7 +947,7 @@ impl<'a, 'gcx, 'tcx> Layout {
field.ty(tcx, substs).layout(infcx)
});
let mut st = Struct::new(dl, false);
st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?;
st.extend(dl, fields, ty)?;
return success(Univariant { variant: st, non_zero: false });
}
@ -973,7 +956,7 @@ impl<'a, 'gcx, 'tcx> Layout {
v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
}).collect::<Vec<_>>();
if !dtor && variants.len() == 2 && hint == attr::ReprAny {
if variants.len() == 2 && hint == attr::ReprAny {
// Nullable pointer optimization
for discr in 0..2 {
let other_fields = variants[1 - discr].iter().map(|ty| {
@ -1045,8 +1028,7 @@ impl<'a, 'gcx, 'tcx> Layout {
Ok(field)
});
let mut st = Struct::new(dl, false);
st.extend(dl, discr.iter().map(Ok).chain(fields)
.chain(drop_flag.iter().map(Ok)), ty)?;
st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?;
size = cmp::max(size, st.min_size());
align = align.max(st.align);
Ok(st)
@ -1277,11 +1259,6 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
return Err(err);
}
// If there's a drop flag, it can't be just a pointer.
if def.dtor_kind().has_drop_flag() {
return Err(err);
}
// Get a zero-sized variant or a pointer newtype.
let zero_or_ptr_variant = |i: usize| {
let fields = def.variants[i].fields.iter().map(|field| {

View File

@ -122,23 +122,16 @@ pub struct CrateAnalysis<'a> {
#[derive(Copy, Clone)]
pub enum DtorKind {
NoDtor,
TraitDtor(bool)
TraitDtor
}
impl DtorKind {
pub fn is_present(&self) -> bool {
match *self {
TraitDtor(..) => true,
TraitDtor => true,
_ => false
}
}
pub fn has_drop_flag(&self) -> bool {
match self {
&NoDtor => false,
&TraitDtor(flag) => flag
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@ -1440,7 +1433,6 @@ bitflags! {
const IS_PHANTOM_DATA = 1 << 3,
const IS_SIMD = 1 << 4,
const IS_FUNDAMENTAL = 1 << 5,
const IS_NO_DROP_FLAG = 1 << 6,
}
}
@ -1558,9 +1550,6 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> {
if attr::contains_name(&attrs, "fundamental") {
flags = flags | AdtFlags::IS_FUNDAMENTAL;
}
if attr::contains_name(&attrs, "unsafe_no_drop_flag") {
flags = flags | AdtFlags::IS_NO_DROP_FLAG;
}
if tcx.lookup_simd(did) {
flags = flags | AdtFlags::IS_SIMD;
}
@ -1627,10 +1616,7 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> {
/// Returns whether this type has a destructor.
pub fn has_dtor(&self) -> bool {
match self.dtor_kind() {
NoDtor => false,
TraitDtor(..) => true
}
self.dtor_kind().is_present()
}
/// Asserts this is a struct and returns the struct's unique
@ -1710,9 +1696,7 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> {
pub fn dtor_kind(&self) -> DtorKind {
match self.destructor.get() {
Some(_) => {
TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG))
}
Some(_) => TraitDtor,
None => NoDtor,
}
}

View File

@ -45,7 +45,7 @@ use std::collections::HashSet;
use syntax::{ast};
use syntax::attr::{self, AttrMetaMethods, AttributeMethods};
use syntax_pos::{self, Span};
use syntax_pos::Span;
use rustc::hir::{self, PatKind};
use rustc::hir::intravisit::FnKind;
@ -1154,56 +1154,3 @@ impl LateLintPass for UnstableFeatures {
}
}
}
/// Lints for attempts to impl Drop on types that have `#[repr(C)]`
/// attribute (see issue #24585).
#[derive(Copy, Clone)]
pub struct DropWithReprExtern;
declare_lint! {
DROP_WITH_REPR_EXTERN,
Warn,
"use of #[repr(C)] on a type that implements Drop"
}
impl LintPass for DropWithReprExtern {
fn get_lints(&self) -> LintArray {
lint_array!(DROP_WITH_REPR_EXTERN)
}
}
impl LateLintPass for DropWithReprExtern {
fn check_crate(&mut self, ctx: &LateContext, _: &hir::Crate) {
let drop_trait = match ctx.tcx.lang_items.drop_trait() {
Some(id) => ctx.tcx.lookup_trait_def(id), None => { return }
};
drop_trait.for_each_impl(ctx.tcx, |drop_impl_did| {
if !drop_impl_did.is_local() {
return;
}
let dtor_self_type = ctx.tcx.lookup_item_type(drop_impl_did).ty;
match dtor_self_type.sty {
ty::TyEnum(self_type_def, _) |
ty::TyStruct(self_type_def, _) => {
let self_type_did = self_type_def.did;
let hints = ctx.tcx.lookup_repr_hints(self_type_did);
if hints.iter().any(|attr| *attr == attr::ReprExtern) &&
self_type_def.dtor_kind().has_drop_flag() {
let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did,
syntax_pos::DUMMY_SP);
let self_defn_span = ctx.tcx.map.def_id_span(self_type_did,
syntax_pos::DUMMY_SP);
ctx.span_lint_note(DROP_WITH_REPR_EXTERN,
drop_impl_span,
"implementing Drop adds hidden state to types, \
possibly conflicting with `#[repr(C)]`",
self_defn_span,
"the `#[repr(C)]` attribute is attached here");
}
}
_ => {}
}
})
}
}

View File

@ -127,7 +127,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) {
UnconditionalRecursion,
InvalidNoMangleItems,
PluginAsLibrary,
DropWithReprExtern,
MutableTransmutes,
);
@ -218,4 +217,5 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) {
// This was renamed to raw_pointer_derive, which was then removed,
// so it is also considered removed
store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok");
store.register_removed("drop_with_repr_extern", "drop flags have been removed");
}

View File

@ -72,48 +72,16 @@ pub enum BranchKind {
type Hint = attr::ReprAttr;
// Representation of the context surrounding an unsized type. I want
// to be able to track the drop flags that are injected by trans.
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct TypeContext {
prefix: Type,
needs_drop_flag: bool,
}
impl TypeContext {
pub fn prefix(&self) -> Type { self.prefix }
pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag }
fn direct(t: Type) -> TypeContext {
TypeContext { prefix: t, needs_drop_flag: false }
}
fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext {
TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
}
}
/// Representations.
#[derive(Eq, PartialEq, Debug)]
pub enum Repr<'tcx> {
/// C-like enums; basically an int.
CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
/// Single-case variants, and structs/tuples/records.
///
/// Structs with destructors need a dynamic destroyedness flag to
/// avoid running the destructor too many times; this is included
/// in the `Struct` if present.
/// (The flag if nonzero, represents the initialization value to use;
/// if zero, then use no flag at all.)
Univariant(Struct<'tcx>, u8),
Univariant(Struct<'tcx>),
/// General-case enums: for each case there is a struct, and they
/// all start with a field for the discriminant.
///
/// Types with destructors need a dynamic destroyedness flag to
/// avoid running the destructor too many times; the last argument
/// indicates whether such a flag is present.
/// (The flag, if nonzero, represents the initialization value to use;
/// if zero, then use no flag at all.)
General(IntType, Vec<Struct<'tcx>>, u8),
General(IntType, Vec<Struct<'tcx>>),
/// Two cases distinguished by a nullable pointer: the case with discriminant
/// `nndiscr` must have single field which is known to be nonnull due to its type.
/// The other case is known to be zero sized. Hence we represent the enum
@ -194,57 +162,36 @@ pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
repr
}
fn dtor_to_init_u8(dtor: bool) -> u8 {
if dtor { 1 } else { 0 }
}
pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; }
impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> {
fn dtor_type(self) -> Ty<'tcx> { self.types.u8 }
}
fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Repr<'tcx> {
match t.sty {
ty::TyTuple(ref elems) => {
Univariant(mk_struct(cx, &elems[..], false, t), 0)
Univariant(mk_struct(cx, &elems[..], false, t))
}
ty::TyStruct(def, substs) => {
let mut ftys = def.struct_variant().fields.iter().map(|field| {
let ftys = def.struct_variant().fields.iter().map(|field| {
monomorphize::field_ty(cx.tcx(), substs, field)
}).collect::<Vec<_>>();
let packed = cx.tcx().lookup_packed(def.did);
// FIXME(16758) don't add a drop flag to unsized structs, as it
// won't actually be in the location we say it is because it'll be after
// the unsized field. Several other pieces of code assume that the unsized
// field is definitely the last one.
let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t);
if dtor {
ftys.push(cx.tcx().dtor_type());
}
Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
Univariant(mk_struct(cx, &ftys[..], packed, t))
}
ty::TyClosure(_, ref substs) => {
Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
Univariant(mk_struct(cx, &substs.upvar_tys, false, t))
}
ty::TyEnum(def, substs) => {
let cases = get_cases(cx.tcx(), def, substs);
let hint = *cx.tcx().lookup_repr_hints(def.did).get(0)
.unwrap_or(&attr::ReprAny);
let dtor = def.dtor_kind().has_drop_flag();
if cases.is_empty() {
// Uninhabitable; represent as unit
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
return Univariant(mk_struct(cx, &ftys[..], false, t),
dtor_to_init_u8(dtor));
return Univariant(mk_struct(cx, &[], false, t));
}
if !dtor && cases.iter().all(|c| c.tys.is_empty()) {
if cases.iter().all(|c| c.tys.is_empty()) {
// All bodies empty -> intlike
let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect();
let bounds = IntBounds {
@ -266,13 +213,10 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
if cases.len() == 1 && hint == attr::ReprAny {
// Equivalent to a struct/tuple/newtype.
let mut ftys = cases[0].tys.clone();
if dtor { ftys.push(cx.tcx().dtor_type()); }
return Univariant(mk_struct(cx, &ftys[..], false, t),
dtor_to_init_u8(dtor));
return Univariant(mk_struct(cx, &cases[0].tys, false, t));
}
if !dtor && cases.len() == 2 && hint == attr::ReprAny {
if cases.len() == 2 && hint == attr::ReprAny {
// Nullable pointer optimization
let mut discr = 0;
while discr < 2 {
@ -315,7 +259,6 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
ftys.extend_from_slice(&c.tys);
if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys, false, t)
}).collect();
@ -377,13 +320,12 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
ftys.extend_from_slice(&c.tys);
if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys[..], false, t)
}).collect();
ensure_enum_fits_in_address_space(cx, &fields[..], t);
General(ity, fields, dtor_to_init_u8(dtor))
General(ity, fields)
}
_ => bug!("adt::represent_type called on non-ADT type: {}", t)
}
@ -681,9 +623,7 @@ fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
/// and fill in the actual contents in a second pass to prevent
/// unbounded recursion; see also the comments in `trans::type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
let c = generic_type_of(cx, r, None, false, false, false);
assert!(!c.needs_drop_flag);
c.prefix
generic_type_of(cx, r, None, false, false)
}
@ -692,25 +632,19 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
// are going to get the wrong type (it will not include the unsized parts of it).
pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, dst: bool) -> Type {
let c = generic_type_of(cx, r, None, true, dst, false);
assert!(!c.needs_drop_flag);
c.prefix
}
pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, dst: bool) -> TypeContext {
generic_type_of(cx, r, None, true, dst, true)
generic_type_of(cx, r, None, true, dst)
}
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, name: &str) -> Type {
let c = generic_type_of(cx, r, Some(name), false, false, false);
assert!(!c.needs_drop_flag);
c.prefix
generic_type_of(cx, r, Some(name), false, false)
}
pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, llty: &mut Type) {
match *r {
CEnum(..) | General(..) | RawNullablePointer { .. } => { }
Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
Univariant(ref st) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
llty.set_struct_body(&struct_llfields(cx, st, false, false),
st.packed)
}
@ -720,50 +654,40 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>,
name: Option<&str>,
sizing: bool,
dst: bool,
delay_drop_flag: bool) -> TypeContext {
debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}",
r, name, sizing, dst, delay_drop_flag);
dst: bool) -> Type {
debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {}",
r, name, sizing, dst);
match *r {
CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)),
CEnum(ity, _, _) => ll_inttype(cx, ity),
RawNullablePointer { nnty, .. } =>
TypeContext::direct(type_of::sizing_type_of(cx, nnty)),
type_of::sizing_type_of(cx, nnty),
StructWrappedNullablePointer { nonnull: ref st, .. } => {
match name {
None => {
TypeContext::direct(
Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
st.packed))
Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
st.packed)
}
Some(name) => {
assert_eq!(sizing, false);
TypeContext::direct(Type::named_struct(cx, name))
Type::named_struct(cx, name)
}
}
}
Univariant(ref st, dtor_needed) => {
let dtor_needed = dtor_needed != 0;
Univariant(ref st) => {
match name {
None => {
let mut fields = struct_llfields(cx, st, sizing, dst);
if delay_drop_flag && dtor_needed {
fields.pop();
}
TypeContext::may_need_drop_flag(
Type::struct_(cx, &fields,
st.packed),
delay_drop_flag && dtor_needed)
let fields = struct_llfields(cx, st, sizing, dst);
Type::struct_(cx, &fields, st.packed)
}
Some(name) => {
// Hypothesis: named_struct's can never need a
// drop flag. (... needs validation.)
assert_eq!(sizing, false);
TypeContext::direct(Type::named_struct(cx, name))
Type::named_struct(cx, name)
}
}
}
General(ity, ref sts, dtor_needed) => {
let dtor_needed = dtor_needed != 0;
General(ity, ref sts) => {
// We need a representation that has:
// * The alignment of the most-aligned field
// * The size of the largest variant (rounded up to that alignment)
@ -795,25 +719,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
};
assert_eq!(machine::llalign_of_min(cx, fill_ty), align);
assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
let mut fields: Vec<Type> =
let fields: Vec<Type> =
[discr_ty,
Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size),
fill_ty].iter().cloned().collect();
if delay_drop_flag && dtor_needed {
fields.pop();
}
match name {
None => {
TypeContext::may_need_drop_flag(
Type::struct_(cx, &fields[..], false),
delay_drop_flag && dtor_needed)
Type::struct_(cx, &fields[..], false)
}
Some(name) => {
let mut llty = Type::named_struct(cx, name);
llty.set_struct_body(&fields[..], false);
TypeContext::may_need_drop_flag(
llty,
delay_drop_flag && dtor_needed)
llty
}
}
}
@ -852,7 +769,7 @@ pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
match *r {
CEnum(ity, _, _) => ity.is_signed(),
General(ity, _, _) => ity.is_signed(),
General(ity, _) => ity.is_signed(),
Univariant(..) => false,
RawNullablePointer { .. } => false,
StructWrappedNullablePointer { .. } => false,
@ -869,7 +786,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
CEnum(ity, min, max) => {
load_discr(bcx, ity, scrutinee, min, max, range_assert)
}
General(ity, ref cases, _) => {
General(ity, ref cases) => {
let ptr = StructGEP(bcx, scrutinee, 0);
load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1),
range_assert)
@ -933,7 +850,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
CEnum(ity, _, _) => {
C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
}
General(ity, _, _) => {
General(ity, _) => {
C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
}
Univariant(..) => {
@ -957,11 +874,11 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
val);
}
General(ity, _, _) => {
General(ity, _) => {
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
StructGEP(bcx, val, 0));
}
Univariant(_, _) => {
Univariant(_) => {
assert_eq!(discr, Disr(0));
}
RawNullablePointer { nndiscr, nnty, ..} => {
@ -1012,11 +929,11 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
CEnum(..) => {
bug!("element access in C-like enum")
}
Univariant(ref st, _dtor) => {
Univariant(ref st) => {
assert_eq!(discr, Disr(0));
struct_field_ptr(bcx, st, val, ix, false)
}
General(_, ref cases, _) => {
General(_, ref cases) => {
struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true)
}
RawNullablePointer { nndiscr, ref nullfields, .. } |
@ -1170,7 +1087,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr
assert_discr_in_range(ity, min, max, discr);
C_integral(ll_inttype(ccx, ity), discr.0, true)
}
General(ity, ref cases, _) => {
General(ity, ref cases) => {
let case = &cases[discr.0 as usize];
let (max_sz, _) = union_size_and_align(&cases[..]);
let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true);
@ -1180,7 +1097,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr
contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]);
C_struct(ccx, &contents[..], false)
}
Univariant(ref st, _dro) => {
Univariant(ref st) => {
assert_eq!(discr, Disr(0));
let contents = build_const_struct(ccx, st, vals);
C_struct(ccx, &contents[..], st.packed)

View File

@ -57,8 +57,8 @@ use callee::{Callee};
use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
use common::{CrateContext, Field, FunctionContext};
use common::{Result, VariantInfo};
use common::{CrateContext, FunctionContext};
use common::{Result};
use common::{fulfill_obligation};
use common::{type_is_zero_size, val_ty};
use common;
@ -76,7 +76,6 @@ use partitioning::{self, PartitioningStrategy, CodegenUnit};
use symbol_map::SymbolMap;
use symbol_names_test;
use trans_item::TransItem;
use tvec;
use type_::Type;
use type_of;
use value::Value;
@ -386,155 +385,6 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
}
// Iterates through the elements of a structural type.
pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
av: ValueRef,
t: Ty<'tcx>,
mut f: F)
-> Block<'blk, 'tcx>
where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
{
let _icx = push_ctxt("iter_structural_ty");
fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
repr: &adt::Repr<'tcx>,
av: adt::MaybeSizedValue,
variant: ty::VariantDef<'tcx>,
substs: &Substs<'tcx>,
f: &mut F)
-> Block<'blk, 'tcx>
where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
{
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
let mut cx = cx;
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
cx = f(cx,
adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
arg);
}
return cx;
}
let value = if common::type_is_sized(cx.tcx(), t) {
adt::MaybeSizedValue::sized(av)
} else {
let data = Load(cx, get_dataptr(cx, av));
let info = Load(cx, get_meta(cx, av));
adt::MaybeSizedValue::unsized_(data, info)
};
let mut cx = cx;
match t.sty {
ty::TyStruct(..) => {
let repr = adt::represent_type(cx.ccx(), t);
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
let val = if common::type_is_sized(cx.tcx(), field_ty) {
llfld_a
} else {
let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter");
Store(cx, llfld_a, get_dataptr(cx, scratch));
Store(cx, value.meta, get_meta(cx, scratch));
scratch
};
cx = f(cx, val, field_ty);
}
}
ty::TyClosure(_, ref substs) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
cx = f(cx, llupvar, upvar_ty);
}
}
ty::TyArray(_, n) => {
let base = get_dataptr(cx, value.value);
let len = C_uint(cx.ccx(), n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
}
ty::TyTuple(ref args) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, arg) in args.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
cx = f(cx, llfld_a, *arg);
}
}
ty::TyEnum(en, substs) => {
let fcx = cx.fcx;
let ccx = fcx.ccx;
let repr = adt::represent_type(ccx, t);
let n_variants = en.variants.len();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, &repr, av, false) {
(adt::BranchKind::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
&en.variants[0], substs, &mut f);
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
// we do **not** use an Unreachable instruction here, even
// though most of the time this basic block will never be hit.
//
// When an enum is dropped it's contents are currently
// overwritten to DTOR_DONE, which means the discriminant
// could have changed value to something not within the actual
// range of the discriminant. Currently this function is only
// used for drop glue so in this case we just return quickly
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let ret_void_cx = fcx.new_block("enum-iter-ret-void");
RetVoid(ret_void_cx, DebugLoc::None);
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
let next_cx = fcx.new_block("enum-iter-next");
for variant in &en.variants {
let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}",
&variant.disr_val
.to_string()));
let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
AddCase(llswitch, case_val, variant_cx.llbb);
let variant_cx = iter_variant(variant_cx,
&repr,
value,
variant,
substs,
&mut f);
Br(variant_cx, next_cx.llbb, DebugLoc::None);
}
cx = next_cx;
}
_ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
}
}
_ => {
cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
}
}
return cx;
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
@ -626,12 +476,12 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let src_repr = adt::represent_type(bcx.ccx(), src_ty);
let src_fields = match &*src_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
&adt::Repr::Univariant(ref s) => &s.fields,
_ => bug!("struct has non-univariant repr"),
};
let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
let dst_fields = match &*dst_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
&adt::Repr::Univariant(ref s) => &s.fields,
_ => bug!("struct has non-univariant repr"),
};

View File

@ -783,19 +783,6 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
C_integral(Type::i64(ccx), i, false)
}
pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
let v = i.as_i64();
let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
if bit_size < 64 {
// make sure it doesn't overflow
assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1)));
}
C_integral(ccx.int_type(), v as u64, true)
}
pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
let v = i.as_u64();

View File

@ -1247,7 +1247,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
-> Vec<MemberDescription> {
let adt = &self.enum_type.ty_adt_def().unwrap();
match *self.type_rep {
adt::General(_, ref struct_defs, _) => {
adt::General(_, ref struct_defs) => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
struct_defs
@ -1281,7 +1281,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
}
}).collect()
},
adt::Univariant(ref struct_def, _) => {
adt::Univariant(ref struct_def) => {
assert!(adt.variants.len() <= 1);
if adt.variants.is_empty() {
@ -1631,7 +1631,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
adt::RawNullablePointer { .. } |
adt::StructWrappedNullablePointer { .. } |
adt::Univariant(..) => None,
adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
adt::General(inttype, _) => Some(discriminant_type_metadata(inttype)),
};
let enum_llvm_type = type_of::type_of(cx, enum_type);

View File

@ -29,9 +29,11 @@ use debuginfo::DebugLoc;
use machine::*;
use monomorphize;
use trans_item::TransItem;
use tvec;
use type_of::{type_of, sizing_type_of, align_of};
use type_::Type;
use value::Value;
use Disr;
use arena::TypedArena;
use syntax_pos::DUMMY_SP;
@ -343,10 +345,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
let sizing_type = adt::sizing_type_of(ccx, &repr, true);
debug!("DST {} sizing_type: {:?}", t, sizing_type);
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
let sized_size = llsize_of_alloc(ccx, sizing_type);
let sized_align = llalign_of_min(ccx, sizing_type);
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
@ -366,15 +368,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = bcx.add(sized_size, unsized_size);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
}
let size = bcx.add(sized_size, unsized_size);
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
@ -471,17 +465,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
}
}
ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
match (def.dtor_kind(), skip_dtor) {
(ty::TraitDtor(_), false) => {
trans_struct_drop(bcx, t, v0)
}
(ty::NoDtor, _) | (_, true) => {
// No dtor? Just the default case
iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
ty::TyTrait(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
@ -496,15 +479,159 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
DebugLoc::None);
bcx
}
ty::TyStruct(def, _) | ty::TyEnum(def, _)
if def.dtor_kind().is_present() && !skip_dtor => {
trans_struct_drop(bcx, t, v0)
}
_ => {
if bcx.fcx.type_needs_drop(t) {
iter_structural_ty(bcx,
v0,
t,
|bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
drop_structural_ty(bcx, v0, t)
} else {
bcx
}
}
}
}
// Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
av: ValueRef,
t: Ty<'tcx>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_structural_ty");
fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
repr: &adt::Repr<'tcx>,
av: adt::MaybeSizedValue,
variant: ty::VariantDef<'tcx>,
substs: &Substs<'tcx>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
let mut cx = cx;
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
cx = drop_ty(cx,
adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
arg, DebugLoc::None);
}
return cx;
}
let value = if type_is_sized(cx.tcx(), t) {
adt::MaybeSizedValue::sized(av)
} else {
let data = Load(cx, get_dataptr(cx, av));
let info = Load(cx, get_meta(cx, av));
adt::MaybeSizedValue::unsized_(data, info)
};
let mut cx = cx;
match t.sty {
ty::TyStruct(..) => {
let repr = adt::represent_type(cx.ccx(), t);
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
let val = if type_is_sized(cx.tcx(), field_ty) {
llfld_a
} else {
let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter");
Store(cx, llfld_a, get_dataptr(cx, scratch));
Store(cx, value.meta, get_meta(cx, scratch));
scratch
};
cx = drop_ty(cx, val, field_ty, DebugLoc::None);
}
}
ty::TyClosure(_, ref substs) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None);
}
}
ty::TyArray(_, n) => {
let base = get_dataptr(cx, value.value);
let len = C_uint(cx.ccx(), n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta,
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
}
ty::TyTuple(ref args) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, arg) in args.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None);
}
}
ty::TyEnum(en, substs) => {
let fcx = cx.fcx;
let ccx = fcx.ccx;
let repr = adt::represent_type(ccx, t);
let n_variants = en.variants.len();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, &repr, av, false) {
(adt::BranchKind::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
&en.variants[0], substs);
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
// we do **not** use an Unreachable instruction here, even
// though most of the time this basic block will never be hit.
//
// When an enum is dropped it's contents are currently
// overwritten to DTOR_DONE, which means the discriminant
// could have changed value to something not within the actual
// range of the discriminant. Currently this function is only
// used for drop glue so in this case we just return quickly
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let ret_void_cx = fcx.new_block("enum-iter-ret-void");
RetVoid(ret_void_cx, DebugLoc::None);
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
let next_cx = fcx.new_block("enum-iter-next");
for variant in &en.variants {
let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}",
&variant.disr_val
.to_string()));
let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
AddCase(llswitch, case_val, variant_cx.llbb);
let variant_cx = iter_variant(variant_cx,
&repr,
value,
variant,
substs);
Br(variant_cx, next_cx.llbb, DebugLoc::None);
}
cx = next_cx;
}
_ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
}
}
_ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
}

View File

@ -206,9 +206,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "type_id") => {
C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0]))
}
(_, "init_dropped") => {
span_bug!(span, "init_dropped intrinsic unsupported");
}
(_, "init") => {
let tp_ty = substs.types[0];
if !type_is_zero_size(ccx, tp_ty) {

View File

@ -100,7 +100,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let size = C_uint(bcx.ccx(), size);
let base = get_dataptr(&bcx, dest.llval);
let bcx = bcx.map_block(|block| {
tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
self.store_operand_direct(block, llslot, tr_elem);
block
})

View File

@ -18,59 +18,46 @@ use common::*;
use debuginfo::DebugLoc;
use rustc::ty::Ty;
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_raw");
let _icx = push_ctxt("tvec::slice_for_each");
let fcx = bcx.fcx;
if type_is_zero_size(bcx.ccx(), unit_ty) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
if bcx.unreachable.get() {
return bcx;
}
let loop_bcx = fcx.new_block("expr_repeat");
let next_bcx = fcx.new_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let bcx = f(bcx, data_ptr, unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, len, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx(), unit_ty);
let add = |bcx, a, b| if zst {
Add(bcx, a, b, DebugLoc::None)
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
InBoundsGEP(bcx, a, &[b])
};
// Now perform the iteration.
let header_bcx = fcx.new_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
let body_bcx = fcx.new_block("iter_vec_loop_body");
let next_bcx = fcx.new_block("iter_vec_next");
CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
let body_bcx = f(body_bcx, data_ptr, unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
&[C_int(bcx.ccx(), 1)]),
body_bcx.llbb);
Br(body_bcx, header_bcx.llbb, DebugLoc::None);
next_bcx
}
let header_bcx = fcx.new_block("slice_loop_header");
let body_bcx = fcx.new_block("slice_loop_body");
let next_bcx = fcx.new_block("slice_loop_next");
let start = if zst {
C_uint(bcx.ccx(), 0 as usize)
} else {
data_ptr
};
let end = add(bcx, start, len);
Br(bcx, header_bcx.llbb, DebugLoc::None);
let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]);
let keep_going =
ICmp(header_bcx, llvm::IntULT, current, end, DebugLoc::None);
CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
let body_bcx = f(body_bcx, if zst { data_ptr } else { current });
let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize));
AddIncomingToPhi(current, next, body_bcx.llbb);
Br(body_bcx, header_bcx.llbb, DebugLoc::None);
next_bcx
}

View File

@ -122,7 +122,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
], ccx.tcx.types.usize)
}
"rustc_peek" => (1, vec![param(ccx, 0)], param(ccx, 0)),
"init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)),
"init" => (1, Vec::new(), param(ccx, 0)),
"uninit" => (1, Vec::new(), param(ccx, 0)),
"forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),

View File

@ -59,7 +59,7 @@ const EMPTY_BUCKET: u64 = 0;
/// around just the "table" part of the hashtable. It enforces some
/// invariants at the type level and employs some performance trickery,
/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
#[unsafe_no_drop_flag]
#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawTable<K, V> {
capacity: usize,
size: usize,
@ -1042,7 +1042,7 @@ impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
impl<K, V> Drop for RawTable<K, V> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE {
if self.capacity == 0 {
return;
}

View File

@ -275,7 +275,7 @@
#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unwind_attributes)]
#![feature(vec_push_all)]
#![feature(zero_one)]

View File

@ -164,10 +164,6 @@ declare_features! (
// Allows using `box` in patterns; RFC 469
(active, box_patterns, "1.0.0", Some(29641)),
// Allows using the unsafe_no_drop_flag attribute (unlikely to
// switch to Accepted; see RFC 320)
(active, unsafe_no_drop_flag, "1.0.0", None),
// Allows using the unsafe_destructor_blind_to_params attribute;
// RFC 1238
(active, dropck_parametricity, "1.3.0", Some(28498)),
@ -300,7 +296,8 @@ declare_features! (
(removed, quad_precision_float, "1.0.0", None),
(removed, struct_inherit, "1.0.0", None),
(removed, test_removed_feature, "1.0.0", None),
(removed, visible_private_types, "1.0.0", None)
(removed, visible_private_types, "1.0.0", None),
(removed, unsafe_no_drop_flag, "1.0.0", None)
);
declare_features! (
@ -565,10 +562,6 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat
attribute is just used for the Rust test \
suite",
cfg_fn!(omit_gdb_pretty_printer_section))),
("unsafe_no_drop_flag", Whitelisted, Gated("unsafe_no_drop_flag",
"unsafe_no_drop_flag has unstable semantics \
and may be removed in the future",
cfg_fn!(unsafe_no_drop_flag))),
("unsafe_destructor_blind_to_params",
Normal,
Gated("dropck_parametricity",

View File

@ -26,7 +26,6 @@
#![feature(associated_consts)]
#![feature(const_fn)]
#![feature(filling_drop)]
#![feature(libc)]
#![feature(rustc_private)]
#![feature(staged_api)]

View File

@ -39,7 +39,7 @@
use std::fmt::{self, Display, Debug};
use std::iter::FromIterator;
use std::ops::Deref;
use std::{ptr, slice, vec};
use std::{mem, ptr, slice, vec};
use serialize::{Encodable, Decodable, Encoder, Decoder};
@ -74,12 +74,22 @@ impl<T: 'static> P<T> {
pub fn map<F>(mut self, f: F) -> P<T> where
F: FnOnce(T) -> T,
{
let p: *mut T = &mut *self.ptr;
// Leak self in case of panic.
// FIXME(eddyb) Use some sort of "free guard" that
// only deallocates, without dropping the pointee,
// in case the call the `f` below ends in a panic.
mem::forget(self);
unsafe {
let p = &mut *self.ptr;
// FIXME(#5016) this shouldn't need to drop-fill to be safe.
ptr::write(p, f(ptr::read_and_drop(p)));
ptr::write(p, f(ptr::read(p)));
// Recreate self from the raw pointer.
P {
ptr: Box::from_raw(p)
}
}
self
}
}

View File

@ -1,61 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check we reject structs that mix a `Drop` impl with `#[repr(C)]`.
//
// As a special case, also check that we do not warn on such structs
// if they also are declared with `#[unsafe_no_drop_flag]`
#![feature(unsafe_no_drop_flag)]
#![deny(drop_with_repr_extern)]
//~^ NOTE lint level defined here
//~| NOTE lint level defined here
#[repr(C)] struct As { x: Box<i8> }
#[repr(C)] enum Ae { Ae(Box<i8>), _None }
struct Bs { x: Box<i8> }
enum Be { Be(Box<i8>), _None }
#[repr(C)] struct Cs { x: Box<i8> }
//~^ NOTE the `#[repr(C)]` attribute is attached here
impl Drop for Cs { fn drop(&mut self) { } }
//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]`
#[repr(C)] enum Ce { Ce(Box<i8>), _None }
//~^ NOTE the `#[repr(C)]` attribute is attached here
impl Drop for Ce { fn drop(&mut self) { } }
//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]`
#[unsafe_no_drop_flag]
#[repr(C)] struct Ds { x: Box<i8> }
impl Drop for Ds { fn drop(&mut self) { } }
#[unsafe_no_drop_flag]
#[repr(C)] enum De { De(Box<i8>), _None }
impl Drop for De { fn drop(&mut self) { } }
fn main() {
let a = As { x: Box::new(3) };
let b = Bs { x: Box::new(3) };
let c = Cs { x: Box::new(3) };
let d = Ds { x: Box::new(3) };
println!("{:?}", (*a.x, *b.x, *c.x, *d.x));
let _a = Ae::Ae(Box::new(3));
let _b = Be::Be(Box::new(3));
let _c = Ce::Ce(Box::new(3));
let _d = De::De(Box::new(3));
}

View File

@ -1,23 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub struct T;
#[unsafe_no_drop_flag]
//~^ ERROR unsafe_no_drop_flag has unstable semantics and may be removed
pub struct S {
pub x: T,
}
impl Drop for S {
fn drop(&mut self) {}
}
pub fn main() {}

View File

@ -8,9 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_no_drop_flag)]
#[unsafe_no_drop_flag]
pub struct ZeroLengthThingWithDestructor;
impl Drop for ZeroLengthThingWithDestructor {
fn drop(&mut self) {}

View File

@ -1,68 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z force-dropflag-checks=on
// ignore-emscripten
// Quick-and-dirty test to ensure -Z force-dropflag-checks=on works as
// expected. Note that the inlined drop-flag is slated for removal
// (RFC 320); when that happens, the -Z flag and this test should
// simply be removed.
//
// See also drop-flag-skip-sanity-check.rs.
use std::env;
use std::process::Command;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "test" {
return test();
}
let mut p = Command::new(&args[0]).arg("test").spawn().unwrap();
// The invocation should fail due to the drop-flag sanity check.
assert!(!p.wait().unwrap().success());
}
#[derive(Debug)]
struct Corrupted {
x: u8
}
impl Drop for Corrupted {
fn drop(&mut self) { println!("dropping"); }
}
fn test() {
{
let mut c1 = Corrupted { x: 1 };
let mut c2 = Corrupted { x: 2 };
unsafe {
let p1 = &mut c1 as *mut Corrupted as *mut u8;
let p2 = &mut c2 as *mut Corrupted as *mut u8;
for i in 0..std::mem::size_of::<Corrupted>() {
// corrupt everything, *including the drop flag.
//
// (We corrupt via two different means to safeguard
// against the hypothetical assignment of the
// dtor_needed/dtor_done values to v and v+k. that
// happen to match with one of the corruption values
// below.)
*p1.offset(i as isize) += 2;
*p2.offset(i as isize) += 3;
}
}
// Here, at the end of the scope of `c1` and `c2`, the
// drop-glue should detect the corruption of (at least one of)
// the drop-flags.
}
println!("We should never get here.");
}

View File

@ -1,67 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z force-dropflag-checks=off
// ignore-emscripten no threads support
// Quick-and-dirty test to ensure -Z force-dropflag-checks=off works as
// expected. Note that the inlined drop-flag is slated for removal
// (RFC 320); when that happens, the -Z flag and this test should
// simply be removed.
//
// See also drop-flag-sanity-check.rs.
use std::env;
use std::process::Command;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "test" {
return test();
}
let s = Command::new(&args[0]).arg("test").status().unwrap();
// Invocatinn should succeed as drop-flag sanity check is skipped.
assert!(s.success());
}
#[derive(Debug)]
struct Corrupted {
x: u8
}
impl Drop for Corrupted {
fn drop(&mut self) { println!("dropping"); }
}
fn test() {
{
let mut c1 = Corrupted { x: 1 };
let mut c2 = Corrupted { x: 2 };
unsafe {
let p1 = &mut c1 as *mut Corrupted as *mut u8;
let p2 = &mut c2 as *mut Corrupted as *mut u8;
for i in 0..std::mem::size_of::<Corrupted>() {
// corrupt everything, *including the drop flag.
//
// (We corrupt via two different means to safeguard
// against the hypothetical assignment of the
// dtor_needed/dtor_done values to v and v+k. that
// happen to match with one of the corruption values
// below.)
*p1.offset(i as isize) += 2;
*p2.offset(i as isize) += 3;
}
}
// Here, at the end of the scope of `c1` and `c2`, the
// drop-glue should detect the corruption of (at least one of)
// the drop-flags.
}
}

View File

@ -55,15 +55,6 @@ pub fn main() {
// compiler is hidden.
rusti::move_val_init(&mut y, x);
// In particular, it may be tracked via a drop-flag embedded
// in the value, or via a null pointer, or via
// mem::POST_DROP_USIZE, or (most preferably) via a
// stack-local drop flag.
//
// (This test used to build-in knowledge of how it was
// tracked, and check that the underlying stack slot had been
// set to `mem::POST_DROP_USIZE`.)
// But what we *can* observe is how many times the destructor
// for `D` is invoked, and what the last value we saw was
// during such a destructor call. We do so after the end of

View File

@ -8,12 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_no_drop_flag)]
static mut drop_count: usize = 0;
#[unsafe_no_drop_flag]
struct Foo {
dropped: bool
}

View File

@ -26,11 +26,11 @@ impl Drop for Kitty {
#[cfg(target_pointer_width = "64")]
pub fn main() {
assert_eq!(mem::size_of::<Cat>(), 8 as usize);
assert_eq!(mem::size_of::<Kitty>(), 16 as usize);
assert_eq!(mem::size_of::<Kitty>(), 8 as usize);
}
#[cfg(target_pointer_width = "32")]
pub fn main() {
assert_eq!(mem::size_of::<Cat>(), 4 as usize);
assert_eq!(mem::size_of::<Kitty>(), 8 as usize);
assert_eq!(mem::size_of::<Kitty>(), 4 as usize);
}

View File

@ -1,27 +0,0 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that MIR trans is used for functions from other crates.
#![feature(unsafe_no_drop_flag)]
#[unsafe_no_drop_flag]
struct Foo;
impl Drop for Foo {
fn drop(&mut self) {
panic!("MIR trans is not enabled for mem::forget");
}
}
fn main() {
let x = Foo;
std::mem::forget(x);
}

View File

@ -1,4 +1,4 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,11 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_no_drop_flag)]
use std::mem::size_of;
#[unsafe_no_drop_flag]
struct Test<T> {
a: T
}

View File

@ -8,14 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_no_drop_flag)]
// ignore-pretty : (#23623) problems when ending with // comments
static mut destructions : isize = 3;
pub fn foo() {
#[unsafe_no_drop_flag]
struct Foo;
impl Drop for Foo {